Compare commits

...

50 Commits

Author SHA1 Message Date
9bfc5a596b ansible : add alloy role 2026-02-24 10:35:08 +00:00
fc45817240 adding argoCD 2026-02-05 09:25:52 +01:00
cf5c5a076e ansible: add vim role 2025-09-15 13:59:36 +02:00
aed7efed71 Ajouter postgresql/urls.md 2025-08-03 21:50:31 +02:00
e4359ca7f9 feat(misc): git 2025-07-31 16:36:41 +02:00
ea32d22dd3 adding misc commands 2025-07-16 16:58:06 +02:00
3e8afbbfb6 kubernetes: update cli 2025-06-05 14:35:12 +02:00
eeeaa55aae kubernetes: update cli 2025-06-04 17:21:39 +02:00
ce39cc3736 add bash section 2025-06-04 09:40:29 +02:00
f6ac190c95 adding pulumi section 2025-06-04 09:34:44 +02:00
7473da14f4 feat(terraform): adding cli file 2025-06-02 14:41:13 +02:00
e5738c5c3f fluxcd : adding repo example with some apps 2025-05-28 16:00:46 +02:00
00a5e56c27 kubernetes : add kubectl krew 2025-05-28 14:14:07 +02:00
5620165b33 terraform: add applications module 2025-05-28 12:02:11 +02:00
7bf1db48f7 terraform: del applications module 2025-05-28 12:01:41 +02:00
dc24dae779 terraform: add buckets module 2025-05-28 12:01:19 +02:00
62148d2af5 terraform: add applications module 2025-05-28 12:00:51 +02:00
d338ccc850 fix unicode 2025-05-28 11:54:17 +02:00
0d5f9eff7e add gitlab section 2025-05-28 11:53:16 +02:00
e96e220869 add postgresql role 2025-05-28 11:31:55 +02:00
24465cb6f9 add mongodb role 2025-05-28 11:23:06 +02:00
dd1900fffe add unbound_resolver role 2025-05-28 11:15:47 +02:00
6679277418 ssh_host_keys 2025-05-28 11:09:56 +02:00
f0a78236c7 linting vault role 2025-05-27 10:04:05 +02:00
50edb313f7 linting varnish role 2025-05-27 10:03:02 +02:00
0b37136364 linting rabbitmq ansible role 2025-05-27 09:58:15 +02:00
44e2674cc4 linting role opcache 2025-05-27 09:51:30 +02:00
0bb618a4a3 linting ansible neo4j 2025-05-27 09:49:05 +02:00
1ff53c6424 linting ansible/roles/filebrowser 2025-05-27 09:46:59 +02:00
526ec984cc update gitignore 2025-05-27 09:41:01 +02:00
9128da538e linting docker, add gitignore 2025-05-27 09:40:27 +02:00
2d378d51a7 ansible: linting bastion 2025-05-27 09:40:27 +02:00
6a3804dd86 Add README 2025-05-26 21:45:13 +02:00
c442162eac update vault cli 2025-05-26 10:00:13 +02:00
98c3628668 add rabbitmq ansible role 2025-05-22 17:19:52 +02:00
8bc817540d add rabbitmq ansible role 2025-05-22 17:13:15 +02:00
1a541ff03a add docker ansible role 2025-05-22 16:22:15 +02:00
fad7538f84 add neo4j ansible role 2025-05-22 16:06:45 +02:00
b6c92a7a8d adding filebrowser role 2025-05-22 15:39:00 +02:00
69ae923d1b add terraform 2025-05-22 15:32:40 +02:00
e245b99487 Revert "test pre-commit"
This reverts commit c806506a0a.
2025-05-22 15:24:14 +02:00
c806506a0a test pre-commit 2025-05-22 15:23:49 +02:00
3633bd7853 cleaning 2025-05-22 15:10:57 +02:00
6b8c90605d add vault role 2025-05-22 15:05:45 +02:00
3431c8b361 adding ansible opcache 2025-05-22 14:46:57 +02:00
0985d65f86 Adding ansible section 2025-05-22 14:43:42 +02:00
8c6e940433 add fluxcd 2025-05-22 11:57:14 +02:00
0e58abfb29 adding script dir, vault scripts 2025-05-22 10:56:53 +02:00
a9f91bb31c adding vault sidecar injector config 2025-05-22 10:14:02 +02:00
e697896c18 varnish 2025-05-21 11:33:48 +02:00
281 changed files with 25598 additions and 0 deletions

2
.gitignore vendored
View File

@@ -1 +1,3 @@
ansible/.ansible
ansible/.ansible-lint
*.swp

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
## Documentation
This repository aims to track all the useful commannds, debug tricks and hidden facts about apps I work with.

4
ansible/ansible-lint Normal file
View File

@@ -0,0 +1,4 @@
---
skip_list:
- yaml[line-length]

View File

@@ -0,0 +1,14 @@
---
alloy_version: "1.13.2"
alloy_log_level: "info"
alloy_username: "alloy"
alloy_groupname: "alloy"
alloy_uid: "1001"
alloy_gid: "1001"
alloy_config_directory: "/etc/alloy"
alloy_config_file: "{{ alloy_config_directory }}/config.alloy"
alloy_working_directory: "/var/lib/alloy"
alloy_binary_dir: "/usr/local/bin"
alloy_run_args: "--stability.level=experimental --server.http.listen-addr='0.0.0.0:12345'"
alloy_restart_on_upgrade: true

View File

@@ -0,0 +1,10 @@
---
- name: Restart alloy
ansible.builtin.systemd_service:
name: "alloy.service"
state: restarted
- name: Systemd reload
ansible.builtin.systemd_service:
daemon_reload: true

View File

@@ -0,0 +1,85 @@
---
- name: Download and install Alloy binary
ansible.builtin.unarchive:
src: "https://github.com/grafana/alloy/releases/download/v{{ alloy_version }}/alloy-linux-amd64.zip"
dest: "{{ alloy_binary_dir }}"
remote_src: true
tags: install
- name: Rename binary
ansible.builtin.copy:
src: "{{ alloy_binary_dir }}/alloy-linux-amd64"
dest: "{{ alloy_binary_dir }}/alloy"
remote_src: true
mode: 0755
tags: install
- name: Create Alloy group
ansible.builtin.group:
name: "{{ alloy_groupname }}"
gid: "{{ alloy_gid }}"
tags: install
- name: Create Alloy user
ansible.builtin.user:
name: "{{ alloy_username }}"
uid: "{{ alloy_uid }}"
group: "{{ alloy_groupname }}"
shell: "/bin/false"
tags: install
- name: Ensure config directory exists
ansible.builtin.file:
path: "{{ alloy_config_directory }}"
owner: "{{ alloy_username }}"
group: "{{ alloy_groupname }}"
state: directory
mode: "0755"
tags: install,config
- name: Ensure working directory exists
ansible.builtin.file:
path: "{{ alloy_working_directory }}"
state: directory
owner: "{{ alloy_username }}"
group: "{{ alloy_groupname }}"
mode: "0755"
tags: install
- name: Deploy Alloy config
ansible.builtin.template:
src: "config.j2"
dest: "{{ alloy_config_directory }}/config.alloy"
owner: "{{ alloy_username }}"
group: "{{ alloy_groupname }}"
mode: "0644"
notify: Restart alloy
tags: install,config
- name: Deploy Alloy default file
ansible.builtin.template:
src: "default.j2"
dest: "/etc/default/alloy"
owner: "root"
group: "root"
mode: 0644
notify: Restart alloy
tags: install,config
- name: Deploy Alloy systemd config
ansible.builtin.template:
src: "systemd_service.j2"
dest: "/etc/systemd/system/alloy.service"
owner: "root"
group: "root"
mode: 0644
tags: install
- name: Start and enable Alloy service
ansible.builtin.systemd_service:
name: "alloy.service"
state: started
enabled: true
daemon_reload: true
tags: install

View File

@@ -0,0 +1,34 @@
otelcol.receiver.filelog "file" {
include = ["/in/*.log"]
operators = [{
type = "regex_parser",
regex = `^(?P<timestamp>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{2}:\d{2}) (?P<msg>.*)$`,
timestamp = {
parse_from = "attributes.timestamp",
layout = "%Y-%m-%dT%H:%M:%S%j",
},
message = {
parse_from = "attributes.msg",
},
},{
type = "remove",
field = "attributes.timestamp",
}]
output {
logs = [otelcol.exporter.syslog.outsyslog.input]
}
}
otelcol.exporter.file "outfile" {
path = "/out/output.log"
}
otelcol.exporter.syslog "outsyslog" {
endpoint = "slaithier-promax14"
}
otelcol.exporter.debug "default" {}
logging {
level = "{{ alloy_log_level }}"
format = "logfmt"
}

View File

@@ -0,0 +1,8 @@
# The configuration file holding the Grafana Alloy configuration.
CONFIG_FILE="{{ alloy_config_file }}"
# User-defined arguments to pass to the run command.
CUSTOM_ARGS="{{ alloy_run_args }}"
# Restart on system upgrade. Defaults to true.
RESTART_ON_UPGRADE={{ alloy_restart_on_upgrade }}

View File

@@ -0,0 +1,19 @@
[Unit]
Description=Vendor-neutral programmable observability pipelines.
Documentation=https://grafana.com/docs/alloy/
Wants=network-online.target
After=network-online.target
[Service]
Restart=always
User={{ alloy_username }}
Group={{ alloy_groupname }}
Environment=HOSTNAME=%H
EnvironmentFile=/etc/default/alloy
WorkingDirectory={{ alloy_working_directory }}
ExecStart={{ alloy_binary_dir }}/alloy run $CUSTOM_ARGS --storage.path={{ alloy_working_directory }} $CONFIG_FILE
ExecReload=/usr/bin/env kill -HUP $MAINPID
TimeoutStopSec=20s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,56 @@
# Bastion deployment role
## Description
Ce rôle a pour but de créer un bastion ssh avec des configurations sécurisées au niveau réseau (nftables et fail2ban) et système (ssh,sudoers).
- Déploie les utilisateurs et leur clé publique.
- Déploie les clés publiques autorisés à se connecter au compte root et limité au connexion depuis 10.17.0.0/16.
- Déploie la configuration sudoers pour que les utilisateurs puissent se connecter au compte root.
- Déploie la configuration nftables et fail2ban.
- Déploie la configuration ssh.
## Variables
- private_networks: Réseaux privés utilisés pour l'administration.
- allowed_networks: Réseaux en liste blanche dans fail2ban.
- admin_users: Liste des utilisateurs autorisés à se connecter avec leur clé publique.
- rundeck_user: Clé publique de rundeck_inf
- dev_users: Liste des utilisateurs normaux à créer
- fail2ban_ignore_ips : Liste des IPs/network à ignorer pour fail2ban
## Installation
1. Installer la machine à partir d'un template existant.
2. Désactiver la configuration par DHCP sur l'interface privée
```bash
vim /etc/network/interfaces
iface <interface> inet static
address <private_ip/netmask>
```
3. ansible-playbook -i hosts-dmz playbooks/bastion.yml -t all -l <hostname>
## Usage
- Déployer un bastion complet
```
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t all
```
- Modifier les configurations de pare-feu
```
vim roles/bastion/templates/nftables.conf.j2
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t firewall
```
- Modifier/Ajouter un utilisateur
```
vim group_vars/all
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t users,ssh
```
- Modifier la configuration SSH
```
vim roles/bastion/templates/sshd_config.j2
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t ssh
```
## Questions/TODO
- SSH 2FA

View File

@@ -0,0 +1,10 @@
---
bastion_private_networks: ""
bastion_allowed_networks: ""
bastion_fail2ban_ignore_ips: ""
bastion_dev_users: []
bastion_admin_users: []
bastion_rundeck_users: []
bastion_log_server: ""

View File

@@ -0,0 +1,25 @@
---
- name: Reload nftables
ansible.builtin.systemd:
name: nftables
state: reloaded
- name: Reload fail2ban
ansible.builtin.systemd:
name: fail2ban
state: reloaded
- name: Restart fail2ban
ansible.builtin.systemd:
name: fail2ban
state: restarted
- name: Restart ssh
ansible.builtin.systemd:
service: sshd
state: restarted
- name: Restart rsyslog
ansible.builtin.systemd:
service: rsyslog
state: restarted

View File

@@ -0,0 +1,46 @@
---
- name: Ensure nftables service is started and enabled
ansible.builtin.systemd:
name: nftables
state: started
enabled: true
tags: all,firewall
- name: Deploying nftables configuration
ansible.builtin.template:
src: nftables.conf.j2
dest: /etc/nftables.conf
owner: root
group: root
mode: "0755"
backup: true
validate: "nft -c -f %s"
notify:
- Reload nftables
- Restart fail2ban
tags: all,firewall
- name: Ensure fail2ban is installed
ansible.builtin.apt:
name: fail2ban
state: present
update_cache: true
tags: all,firewall
- name: Ensure fail2ban is enabled and running
ansible.builtin.systemd:
name: fail2ban
state: started
enabled: true
tags: all,firewall
- name: Deploying fail2ban ssh conf
ansible.builtin.template:
src: sshd.conf.j2
dest: /etc/fail2ban/jail.d/sshd.conf
owner: root
group: root
mode: "0644"
notify: reload fail2ban
tags: all,firewall

View File

@@ -0,0 +1,13 @@
---
- name: User deployment related tasks
ansible.builtin.include_tasks: users.yml
tags: all,users,rootonly
- name: Firewall deployment related tasks
ansible.builtin.include_tasks: firewall.yml
tags: all,firewall
- name: Ssh deployment related tasks
ansible.builtin.include_tasks: ssh.yml
tags: all,ssh

View File

@@ -0,0 +1,28 @@
---
- name: Ensure sshd is enabled
ansible.builtin.systemd:
name: sshd
enabled: true
tags: all,ssh
- name: Deploy sshd_config
ansible.builtin.template:
src: sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
validate: "sshd -T -f %s"
notify: Restart ssh
tags: all,ssh
- name: Deploy rsyslog conf
ansible.builtin.template:
src: auth.conf.j2
dest: /etc/rsyslog.d/auth.conf
owner: root
group: root
mode: "0644"
notify: Restart rsyslog
tags: all,ssh

View File

@@ -0,0 +1,53 @@
---
- name: Installing sudo
ansible.builtin.apt:
name: sudo
update_cache: true
state: present
tags: all,users
- name: Adding targetpw directive for sudo
ansible.builtin.lineinfile:
path: /etc/sudoers
line: "{{ item }}"
state: present
with_items:
- "Defaults targetpw"
- "Defaults insults"
tags: all,users
- name: Creating admin users
ansible.builtin.user:
name: "{{ item.username }}"
shell: /bin/bash
groups: sudo
with_items: "{{ bastion_admin_users }}"
tags: all,users
- name: Creating dev users
ansible.builtin.user:
name: "{{ item.username }}"
shell: /bin/bash
groups: sudo
with_items: "{{ bastion_dev_users }}"
tags: all,users
- name: Adding authorized_keys for regular users
ansible.posix.authorized_key:
user: "{{ item.username }}"
key: "{{ item.public_key }}"
state: "{{ item.state | default('present') }}"
with_items:
- "{{ bastion_admin_users }}"
- "{{ bastion_dev_users }}"
tags: all,users
- name: Adding authorized_keys for root users
ansible.posix.authorized_key:
user: "root"
key: "{{ item.public_key }}"
key_options: 'from="{{ bastion_private_networks }}"'
state: "{{ item.state }}"
with_items: "{{ bastion_admin_users }}"
tags: all,users,rootonly

View File

@@ -0,0 +1,2 @@
#{{ ansible_managed }}
auth,authpriv.* @{{ log_server }}

View File

@@ -0,0 +1,32 @@
#{{ ansible_managed }}
#!/usr/sbin/nft -f
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
iifname lo accept;
tcp dport 22 accept;
icmp type echo-request accept;
# established/related connections
ct state established,related accept
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy drop;
iifname lo accept;
tcp dport 22 accept;
tcp dport {80, 443, 9200} accept;
tcp dport {53, 123} accept;
udp dport {53, 123, 1514} accept;
icmp type echo-request accept;
# established/related connections
ct state established,related accept;
}
}

View File

@@ -0,0 +1,12 @@
# {{ ansible_managed }}
[DEFAULT]
ignoreip = {{ fail2ban_ignore_ips }}
findtime = 3600
bantime = 86400
maxretry = 3
banaction = nftables-multiport
banaction_allports = nftables-allports
[sshd]
enabled = true

View File

@@ -0,0 +1,95 @@
Include /etc/ssh/sshd_config.d/*.conf
#Port 22
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::
#LoginGraceTime 2m
PermitRootLogin without-password
#StrictModes yes
#MaxAuthTries 3
#MaxSessions 10
#PubkeyAuthentication yes
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
PasswordAuthentication no
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding no
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
ClientAliveInterval 300
ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
{% for item in admin_users -%}
Match User {{ item.username }}
{% endfor %}
{% for item in dev_users -%}
Match User {{ item.username }}
{% endfor %}

View File

@@ -0,0 +1,42 @@
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# disabled_plugins = ["cri"]
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

View File

@@ -0,0 +1,6 @@
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-opts": {
"max-size": "10m"
}
}

View File

@@ -0,0 +1,16 @@
---
- name: Restart containerd
ansible.builtin.systemd:
name: containerd
state: restarted
- name: Restart docker
ansible.builtin.systemd:
name: docker
state: restarted
- name: Restart multipathd
ansible.builtin.systemd:
name: multipathd
state: restarted

View File

@@ -0,0 +1,139 @@
---
- name: Set specific variables for distributions
ansible.builtin.include_vars: '{{ item }}'
with_first_found:
- '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml'
- '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
- '{{ ansible_distribution }}.yml'
- '{{ ansible_os_family }}.yml'
- default.yml
- name: Suppression anciennes versions de docker
ansible.builtin.apt:
pkg:
- docker
- docker-engine
- docker.io
state: absent
- name: Installation des prérequis
ansible.builtin.apt:
pkg:
- apt-transport-https
- ca-certificates
- curl
- gnupg2
- software-properties-common
- nfs-common
state: present
update_cache: true
- name: Ajout de la clef GPG docker.com
ansible.builtin.apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Ajout du repo APT docker.com
ansible.builtin.apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable
state: present
filename: docker
- name: Installation de docker dans la version {{ docker_target_version }}
ansible.builtin.apt:
pkg:
- docker-ce={{ docker_target_version }}
- docker-ce-cli={{ docker_target_version }}
state: present
update_cache: true
register: apt_out
- name: Affichage sortie du module apt
ansible.builtin.debug:
msg:
- "{{ apt_out.stdout_lines }}"
- "{{ apt_out.stderr_lines }}"
failed_when: apt_out.rc != 0
- name: Verrouillage des paquets docker-ce
ansible.builtin.dpkg_selections:
name: "{{ item }}"
selection: hold
with_items:
- docker-ce
- docker-ce-cli
- containerd.io
- name: Adding cleaning cron
ansible.builtin.cron:
minute: "0"
hour: "0"
job: "/usr/bin/docker image prune -a -f >/dev/null 2>&1"
name: "image-prune"
user: "root"
cron_file: image-prune
- name: Augmentation des valeurs systeme inotify max_user_instances
ansible.posix.sysctl:
name: fs.inotify.max_user_instances
value: '4096'
sysctl_set: true
state: present
reload: true
- name: Augmentation des valeurs systeme inotify max_user_watches
ansible.posix.sysctl:
name: fs.inotify.max_user_watches
value: '2097152'
sysctl_set: true
state: present
reload: true
- name: Désactivation du swap
ansible.posix.sysctl:
name: vm.swappiness
value: '1'
sysctl_set: true
state: present
reload: true
- name: Customise containerd file config.toml
ansible.builtin.copy:
src: config.toml
dest: /etc/containerd/config.toml
owner: root
group: root
mode: "0755"
notify: Restart containerd
- name: Customise docker file daemon.json
ansible.builtin.copy:
src: daemon.json
dest: /etc/docker/daemon.json
owner: root
group: root
mode: "0755"
notify: Restart docker
- name: Adding Multipathd blacklist for longhorn support
ansible.builtin.blockinfile:
path: /etc/multipath.conf
block: |
# https://longhorn.io/kb/troubleshooting-volume-with-multipath/
blacklist {
devnode "^sd[a-z0-9]+"
}
notify: Restart multipathd
- name: Start and enable iscsi daemon for longhorn support
ansible.builtin.systemd_service:
name: iscsid
enabled: true
state: started
- name: Enable iscsi_tcp kernel module for longhorn support
community.general.modprobe:
name: iscsi_tcp
state: present

View File

@@ -0,0 +1,3 @@
---
docker_target_version: "5:24.0.7-1~debian.12~bookworm"

View File

@@ -0,0 +1,3 @@
---
docker_target_version: "5:18.09.9~3-0~ubuntu-bionic"

View File

@@ -0,0 +1,3 @@
---
docker_target_version: "5:20.10.9~3-0~ubuntu-focal"

View File

@@ -0,0 +1,3 @@
---
docker_target_version: "5:20.10.23~3-0~ubuntu-jammy"

View File

@@ -0,0 +1,3 @@
---
docker_target_version: "5:27.5.1-1~ubuntu.24.04~noble"

View File

@@ -0,0 +1,10 @@
---
filebrowser_version: "2.32.0"
filebrowser_user: "admin"
filebrowser_group: "admin"
filebrowser_port: "8080"
filebrowser_address: "0.0.0.0"
filebrowser_log_dir: "/var/log/filebrowser"
filebrowser_config_dir: "/etc/filebrowser"
filebrowser_root: "/nas/{{ env_long }}"

View File

@@ -0,0 +1,10 @@
/var/log/filebrowser/filebrowser.log
{
rotate 7
daily
missingok
dateext
copytruncate
notifempty
compress
}

View File

@@ -0,0 +1,2 @@
if $programname == 'filebrowser' then /var/log/filebrowser/filebrowser.log
&stop

View File

@@ -0,0 +1,10 @@
---
- name: Daemon reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart
ansible.builtin.systemd_service:
name: filebrowser.service
state: restarted

View File

@@ -0,0 +1,90 @@
---
- name: Create download folder
ansible.builtin.file:
path: "/tmp/filebrowser"
state: directory
mode: "0755"
- name: Download Filebrowser binary from github
ansible.builtin.unarchive:
src: "https://github.com/filebrowser/filebrowser/releases/download/v{{ filebrowser_version }}/linux-amd64-filebrowser.tar.gz"
dest: "/tmp/filebrowser"
remote_src: true
- name: Moving filebrowser binary to /usr/local/bin
ansible.builtin.copy:
src: /tmp/filebrowser/filebrowser
dest: /usr/local/bin/filebrowser
owner: root
group: root
mode: "0755"
remote_src: true
- name: Create configuration folder
ansible.builtin.file:
path: "{{ filebrowser_config_dir }}"
owner: "{{ filebrowser_user }}"
group: "{{ filebrowser_group }}"
mode: "0755"
state: directory
- name: Deploying configuration file
ansible.builtin.template:
src: "settings.json.j2"
dest: "{{ filebrowser_config_dir }}/settings.json"
owner: "{{ filebrowser_user }}"
group: "{{ filebrowser_group }}"
mode: "0644"
notify: Restart
- name: Deploying service file
ansible.builtin.template:
src: "filebrowser.service.j2"
dest: "/lib/systemd/system/filebrowser.service"
owner: "root"
group: "root"
mode: "0644"
notify:
- Daemon reload
- name: Running handlers for daemon reload
ansible.builtin.meta: flush_handlers
- name: Enabling filebrowser service
ansible.builtin.systemd:
name: filebrowser.service
state: started
enabled: true
- name: Create log folder
ansible.builtin.file:
path: "{{ filebrowser_log_dir }}"
owner: root
group: adm
mode: "0755"
state: directory
- name: Deploy rsyslog config
ansible.builtin.copy:
src: rsyslog.conf
dest: /etc/rsyslog.d/filebrowser.conf
owner: root
group: root
mode: "0644"
- name: Deploy logrotate config
ansible.builtin.copy:
src: logrotate.conf
dest: /etc/logrotate.d/filebrowser
owner: root
group: root
mode: "0644"
- name: Create custom branding folder
ansible.builtin.file:
path: "/etc/filebrowser/branding/img/icons"
owner: "{{ filebrowser_user }}"
group: "{{ filebrowser_group }}"
mode: "0755"
state: directory

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Filebrowser
Documentation=https://filebrowser.org
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
User={{ fb_user }}
Group={{ fb_group }}
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/filebrowser --config {{ fb_config_dir }}/settings.json
SyslogIdentifier=filebrowser
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,8 @@
{
"port": {{ fb_port }},
"baseURL": "",
"address": "{{ fb_address }}",
"log": "{{ fb_log_dir }}/filebrowser.log",
"database": "{{ fb_config_dir }}/filebrowser.db",
"root": "{{ fb_root }}"
}

View File

@@ -0,0 +1,27 @@
# Installation et configuration de mongoDB
## Configuration
La configuration se fait via le fichier ansible/group_vars/{{ nom_du_groupe }}.
## Variables
* mongodb_replicaset_name : Nom du replicaset configurés entre les serveurs. (Exemple: mongodb-stg)
## Fonctionnalités
* Installe les dépendances du rôle et de mongodb, le dépot MongoDB 6, les paquets mongodb.
* Déploie les outils de backups.
* Déploie la configuration relative à la supervision (check, fichier d'authentification et rôle custom).
## Tags
* install : installe mongodb, la supervision, les backups et les utilisateurs.
* supervision : met à jour les éléments relatifs à la supervision (check, configuration, rôle custom).
* backup: déploie les outils nécessaires aux sauvegardes (scripts, role, utilisateur, cron).
## Modification de configuration
* Mise à jour des éléments de supervision :
```
ansible-playbook -i hosts-stg playbooks/mongodb.yml -t supervision -l mongodb_stg
```

View File

@@ -0,0 +1,5 @@
---
mongodb_nfs_server_stg: ""
mongodb_nfs_server_prd: ""
mongodb_cmk_url: ""

View File

@@ -0,0 +1,9 @@
[Unit]
Description=Disable Transparent Huge Pages (THP)
DefaultDependencies=no
After=sysinit.target local-fs.target
[Service]
Type=oneshot
ExecStart=/bin/sh -c 'echo never | tee /sys/kernel/mm/transparent_hugepage/enabled > /dev/null'
[Install]
WantedBy=basic.target

View File

@@ -0,0 +1,2 @@
vm.swappiness=1
vm.max_map_count=128000

View File

@@ -0,0 +1,13 @@
/var/log/mongodb/mongod.log {
daily
rotate 7
missingok
compress
delaycompress
notifempty
create 640 mongodb mongodb
sharedscripts
postrotate
/bin/kill -SIGUSR1 `cat /var/run/mongodb/mongod.pid 2>/dev/null` >/dev/null 2>&1
endscript
}

View File

@@ -0,0 +1,15 @@
---
- name: Systemd daemon_reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart mongodb
ansible.builtin.systemd_service:
name: mongod
state: restarted
- name: Restart pbm-agent
ansible.builtin.systemd_service:
name: pbm-agent
state: restarted

View File

@@ -0,0 +1,124 @@
---
- name: Install dependencies
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg2
- lsb-release
- nfs-common
tags: install,backup
- name: Ensure nas directory exists
ansible.builtin.file:
path: /nas
state: directory
owner: root
group: root
mode: "0755"
tags: install,backup
- name: Create backup custom role
community.mongodb.mongodb_role:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
replica_set: "{{ mongodb_replicaset_name }}"
database: "admin"
name: "pbmAnyAction"
privileges:
- resource:
db: ""
collection: ""
actions:
- "anyAction"
roles:
- role: "backup"
db: "admin"
- role: "clusterMonitor"
db: "admin"
- role: "restore"
db: "admin"
- role: "readWrite"
db: "admin"
state: present
tags: install,backup
- name: Create backup user
community.mongodb.mongodb_user:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
replica_set: "{{ mongodb_replicaset_name }}"
database: "admin"
name: "backup"
password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/backup:password') }}"
roles: "pbmAnyAction"
auth_mechanism: "SCRAM-SHA-256"
state: "present"
update_password: on_create
tags: install,backup
- name: Add nas mounting to fstab
ansible.posix.mount:
src: "{{ mongodb_nfs_server_stg }}:/data/shares/mongodb"
path: "/nas"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: present
when: dbenv = "stg"
tags: install,backup,nfs
- name: Add nas mounting to fstab
ansible.posix.mount:
src: "{{ mongodb_nfs_server_prd }}:/data/shares/mongodb"
path: "/nas"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: present
when: dbenv = "prd"
tags: install,backup,nfs
- name: Ensure scripts directory exists
ansible.builtin.file:
path: /data/scripts
state: directory
owner: root
group: root
mode: "0755"
tags: install,backup
- name: Deploy backup script
ansible.builtin.template:
src: mongodb-dump-full.sh.j2
dest: /data/scripts/mongodb-dump-full.sh
owner: root
group: root
mode: "0750"
tags: install,backup
- name: Add cron to trigger backup
ansible.builtin.cron:
name: "mongodb-dump-full"
weekday: "*"
minute: "0"
hour: "02"
user: root
job: "/data/scripts/mongodb-dump-full.sh -r 14 -d /nas -c"
cron_file: mongodb-dump-full
disabled: true
tags: install,backup
- name: Add MAILTO variable to cronfile
community.general.cronvar:
name: MAILTO
value: "''"
cron_file: mongodb-dump-full
state: present
tags: install,backup
- name: Add check batch conf to checkmk
ansible.builtin.lineinfile:
path: /etc/check_mk/mrpe.cfg
line: "#script_mongodb-dump-databases.sh /usr/local/nagios/plugins/check_batch mongodb-dump-full.sh 129600"
state: present
tags: install,backup

View File

@@ -0,0 +1,128 @@
---
- name: Install requirements
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg
- python3-pip
tags: install,conf,users
- name: Installing pymongo via pip
ansible.builtin.pip:
name:
- pymongo
tags: install,conf,users
- name: Deploy service to disable THP at boot
ansible.builtin.copy:
src: disable-thp.service
dest: /etc/systemd/system/disable-thp.service
owner: root
group: root
mode: "0755"
notify: Systemd daemon_reload
tags: install
- name: Enable disable-thp service
ansible.builtin.systemd:
name: disable-thp
enabled: true
masked: false
tags: install
- name: Deploy sysctl conf (max_map_count, swappiness)
ansible.builtin.copy:
src: local.conf
dest: /etc/sysctl.d/local.conf
owner: root
group: root
mode: "0644"
tags: install,conf
- name: Get mongodb.com gpg key
ansible.builtin.get_url:
url: https://pgp.mongodb.com/server-7.0.asc
dest: /usr/share/keyrings/mongodb-server-7.0.asc
owner: root
group: root
mode: "0644"
tags: install
- name: Add mongodb.com repository
ansible.builtin.apt_repository:
repo: "deb [ signed-by=/usr/share/keyrings/mongodb-server-7.0.asc] http://repo.mongodb.org/apt/debian bullseye/mongodb-org/7.0 main"
state: present
tags: install
- name: Install mongodb
ansible.builtin.apt:
name: mongodb-org
state: present
tags: install
- name: Holding mongodb packages
ansible.builtin.dpkg_selections:
name: "{{ item }}"
selection: hold
with_items:
- mongodb-org
- mongodb-org-database
- mongodb-org-server
- mongodb-mongosh
- mongodb-org-mongos
- mongodb-org-tools
tags: install
- name: Ensure permissions are correct on /var/lib/mongodb
ansible.builtin.file:
path: /var/lib/mongodb
owner: mongodb
group: mongodb
mode: "0755"
tags: install
- name: Start and enable mongodb service
ansible.builtin.systemd:
name: mongod
state: started
enabled: true
tags: install
- name: Deploy conf file
ansible.builtin.template:
src: mongod.conf.j2
dest: /etc/mongod.conf
owner: root
group: root
mode: "0644"
tags: install,conf
notify: Restart mongodb
- name: Deploy keyFile for auth in cluster
ansible.builtin.template:
src: mongo-keyfile.j2
dest: /etc/mongo-keyfile
owner: mongodb
group: mongodb
mode: "0400"
tags: install
- name: Deploy logrotate conf file
ansible.builtin.copy:
src: logrotate.conf
dest: /etc/logrotate.d/mongodb
owner: root
group: root
mode: "0644"
tags: install
- name: Create replicaset
community.mongodb.mongodb_replicaset:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
login_host: localhost
replica_set: "{{ mongodb_replicaset_name }}"
members: "{{ mongodb_replicaset_members }}"
tags: install

View File

@@ -0,0 +1,13 @@
---
- name: Include install tasks
ansible.builtin.include_tasks: install.yml
tags: install
- name: Include supervision tasks
ansible.builtin.include_tasks: supervision.yml
tags: install,supervision
- name: Include backup tasks
ansible.builtin.include_tasks: backup.yml
tags: install,backup

View File

@@ -0,0 +1,114 @@
---
- name: Deploy checkmk conf template
ansible.builtin.template:
src: mk_mongodb.cfg.j2
dest: /etc/check_mk/mk_mongodb.cfg
owner: root
group: root
mode: "0644"
tags: install
- name: Deploy checkmk mongo check
ansible.builtin.get_url:
url: https://{{ mongodb_checkmk_url }}/check_mk/agents/plugins/mk_mongodb.py
dest: /usr/lib/check_mk_agent/plugins/
owner: root
group: root
mode: "0755"
tags: install
- name: Deploy supervision role
community.mongodb.mongodb_role:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
replica_set: "{{ mongodb_replicaset_name }}"
name: supervision
database: admin
privileges:
- resource:
db: ""
collection: "system.version"
actions:
- "collStats"
- resource:
db: ""
collection: "system.keys"
actions:
- "collStats"
- resource:
db: ""
collection: "system.roles"
actions:
- "collStats"
- resource:
db: ""
collection: "system.users"
actions:
- "collStats"
- resource:
db: ""
collection: "system.preimages"
actions:
- "collStats"
- resource:
db: ""
collection: "system.indexBuilds"
actions:
- "collStats"
- resource:
db: ""
collection: "system.rollback.id"
actions:
- "collStats"
- resource:
db: ""
collection: "system.views"
actions:
- "collStats"
- resource:
db: ""
collection: "system.replset"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.initialSyncId"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.election"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.oplogTruncateAfterPoint"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.minvalid"
actions:
- "collStats"
roles:
- role: "clusterMonitor"
db: "admin"
- role: "readAnyDatabase"
db: "admin"
state: present
tags: install,supervision
- name: Create checkmk mongodb user
community.mongodb.mongodb_user:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
database: "admin"
name: "checkmk"
password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/checkmk:password') }}"
roles: "supervision"
auth_mechanism: "SCRAM-SHA-256"
replica_set: "{{ mongodb_replicaset_name }}"
state: "present"
update_password: on_create
tags: install,supervision

View File

@@ -0,0 +1,10 @@
[MONGODB]
# all keys are optional
host = 127.0.0.1
# host defaults to localhost
username = checkmk
password = {{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/checkmk:password') }}
auth_source = admin
# auth_source defaults to admin
auth_mechanism = DEFAULT

View File

@@ -0,0 +1 @@
{{ lookup('hashi_vault','ansible/data/mongodb/{{ env }}/keyFile:key') }}

View File

@@ -0,0 +1,44 @@
# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
processManagement:
pidFilePath: /var/run/mongodb/mongod.pid
# Where and how to store data.
storage:
dbPath: /var/lib/mongodb
# engine:
# wiredTiger:
# where to write logging data.
systemLog:
destination: file
logAppend: true
logRotate: reopen
path: /var/log/mongodb/mongod.log
# network interfaces
net:
port: 27017
bindIp: 0.0.0.0
# how the process runs
processManagement:
timeZoneInfo: /usr/share/zoneinfo
security:
keyFile: /etc/mongo-keyfile
authorization: enabled
#operationProfiling:
replication:
replSetName: {{ mongodb_replicaset_name }}
#sharding:
setParameter:
transactionLifetimeLimitSeconds: 3600

View File

@@ -0,0 +1,78 @@
#!/bin/bash
set -eu
DATE=$(date +%Y%m%d)
HOSTNAME=$(hostname -s)
STATUS=0
LOGFILE="/var/tmp/mongodb-dump-databases.log"
HOST="localhost"
COMPRESS=false
USER="backup"
PASSWORD="{{lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/backup:password') }}"
DUMP_OPTIONS=""
touch ${LOGFILE}
#
# Fonctions
#
checkNas()
{
if [ ! -e "${BACKUPDIR}/.mount" ]; then
echo "${BACKUPDIR} not mounted. Backup aborted." | tee -a ${LOGFILE}
exit 1
fi
}
usage()
{
echo "$0 -r <retention> -d <repertoire> -c (compression)"
echo "Exemple : /data/scripts/mongodb-dump-full.sh -r 20 -d /nas -c"
}
#
# Main
#
while getopts "hcr:d:" option
do
case "${option}"
in
r)
RETENTION=${OPTARG};;
d)
BACKUPDIR=${OPTARG};;
c)
COMPRESS=true;;
h | *)
usage
exit 1;;
esac
done
echo "Lancement du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}
# check if the node is secondary
SEC=$(mongosh --host=${HOST} --authenticationDatabase admin --username ${USER} --password ${PASSWORD} --eval 'rs.hello().secondary' --quiet)
if [ ! "${SEC}" == "true" ]; then
echo "$(date +%s)|2|Node is not seconday ${LOGFILE}" > /var/tmp/batch."$(basename $0)"
exit 0
fi
[ -d "${BACKUPDIR}" ] || mkdir -p "${BACKUPDIR}"
if [ "${COMPRESS}" ]; then
DUMP_OPTIONS="${DUMP_OPTIONS} --authenticationDatabase=admin --username=${USER} --password=${PASSWORD} --gzip"
else
DUMP_OPTIONS="${DUMP_OPTIONS} --authenticationDatabase=admin --username=${USER} --password=${PASSWORD}"
fi
# dump
mongodump -v --host=${HOST} ${DUMP_OPTIONS} --archive="${BACKUPDIR}/${DATE}-${HOSTNAME}.gz" |tee -a ${LOGFILE}
STATUS=$?
# output in statusfile for checkmk
echo "$(date +%s)|${STATUS}|Check log file ${LOGFILE}" > /var/tmp/batch."$(basename "$0")"
echo "Fin du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}

View File

@@ -0,0 +1,5 @@
## Tags
* Install : Installe Neo4j et déploie son fichier de configuration.
* Config : Déploie le fichier de configuration.
* Backup : Déploie les configurations relatives aux sauvegardes.

View File

@@ -0,0 +1,4 @@
---
neo4j_default_listen_address: "0.0.0.0"
neo4j_default_advertised_address: ""

View File

@@ -0,0 +1,89 @@
#!/bin/sh
# {{ ansible_managed }}
set -eu
NOW=$(date +%Y%m%d-%H%M%S)
HOSTNAME=$(hostname -s)
STATUS=0
EXITSTATUS=0
OUTPUT=""
LOGFILE="/data/log/scripts/neo4j-dump-databases.log"
COMPRESS=false
touch ${LOGFILE}
#
# Functions
#
checkNas()
{
if [ ! -e "${BACKUPDIR}/.mount" ]; then
echo "${BACKUPDIR} not mounted. Backup aborted." | tee -a ${LOGFILE}
exit 1
fi
}
usage()
{
echo "$0 -r <retention> -d <repertoire> -c (compression)"
echo "Exemple : /data/scripts/neo4j-dump-databases -r 20 -d /nas -c"
}
#
# Main
#
while getopts "hcr:d:" option
do
case "${option}"
in
r)
RETENTION=${OPTARG};;
d)
BACKUPDIR=${OPTARG};;
c)
COMPRESS=true;;
h | *)
usage
exit 1;;
esac
done
echo "[$(date '+%Y%m%d %H%M%S')] Lancement du dump neo4j de ${HOSTNAME} - Retention : ${RETENTION} - Repertoire de destination : ${BACKUPDIR}" | tee -a ${LOGFILE}
mkdir -p "$BACKUPDIR"/neo4jdump/
find "$BACKUPDIR"/neo4jdump/ -mindepth 1 -maxdepth 1 -type f -daystart -mtime +"$RETENTION" -delete
echo "[$(date '+%Y%m%d %H%M%S')][INFO] Arret de Neo4j" | tee -a ${LOGFILE}
systemctl stop neo4j.service
for db in neo4j system ; do
echo "[$(date '+%Y%m%d %H%M%S')][INFO] dump de $db" | tee -a ${LOGFILE}
if [ "$COMPRESS" = true ]; then
neo4j-admin database dump --to-stdout $db | gzip -1 > "$BACKUPDIR"/neo4jdump/"${NOW}"_"${HOSTNAME}"_"${db}".dump.gz | tee -a ${LOGFILE}
else
neo4j-admin database dump --to-stdout $db > "$BACKUPDIR"/neo4jdump/"${NOW}"_"${HOSTNAME}"_"${db}".dump | tee -a ${LOGFILE}
fi
STATUS=$?
if [ ! ${STATUS} -eq 0 ]; then
echo "[$(date '+%Y%m%d %H%M%S')][CRIT] neo4jdump failed" | tee -a ${LOGFILE}
OUTPUT="${OUTPUT} neo4j"
EXITSTATUS=1
break
fi
done
echo "[$(date '+%Y%m%d %H%M%S')][INFO] Demarrage de Neo4j" | tee -a ${LOGFILE}
systemctl start neo4j.service
if [ ${EXITSTATUS} -eq 0 ]; then
echo "$(date '+%s')|${EXITSTATUS}|Everything has been successfully backuped !" | tee /var/tmp/batch.`basename $0`
else
echo "$(date '+%s')|${EXITSTATUS}|Problem with mysqldump backup (${OUTPUT}) !" | tee /var/tmp/batch.`basename $0`
fi
echo "[$(date '+%Y%m%d %H%M%S')] Fin du dump neo4j de ${HOSTNAME} - Retention : ${RETENTION} - Repertoire de destination : ${BACKUPDIR}" | tee -a ${LOGFILE}
exit ${EXITSTATUS}

View File

@@ -0,0 +1,6 @@
---
- name: Restart neo4j
ansible.builtin.systemd_service:
name: neo4j.service
state: restarted

View File

@@ -0,0 +1,47 @@
---
- name: Setting up mount point for /nas
ansible.posix.mount:
path: "/nas"
src: "{{ neo4j_nfs_server }}:/data/shares/neo4j"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: mounted
tags: backup
- name: Ensure scripts folder exists
ansible.builtin.file:
path: "/data/scripts"
owner: root
group: root
mode: "0644"
state: directory
tags: backup
- name: Deploy backup script
ansible.builtin.copy:
src: "neo4j-dump-database.sh"
dest: "/data/scripts/neo4j-dump-database.sh"
owner: root
group: root
mode: "0750"
tags: backup
- name: Setting up backup cron
ansible.builtin.cron:
name: "neo4j backup"
minute: "0"
hour: "04"
job: "/data/scripts/neo4j-dump-database.sh -r 7 -d /nas -c"
disabled: true
user: root
cron_file: neo4j-backup
state: present
tags: backup
- name: Adding checkmk config for backup script
ansible.builtin.lineinfile:
path: "/etc/check_mk/mrpe.cfg"
regexp: "^#?neo4j_dump /usr/local/nagios/plugins/check_batch neo4j-dump-database.sh 129600"
line: "#neo4j_dump /usr/local/nagios/plugins/check_batch neo4j-dump-database.sh 129600"
tags: backup

View File

@@ -0,0 +1,56 @@
---
- name: Checking dependencies
ansible.builtin.apt:
package: "{{ item }}"
state: present
with_items:
- apt-transport-https
- ca-certificates
- gnupg2
- openjdk-17-jdk
- nfs-common
tags: install
- name: Adding neo4j pgp key
ansible.builtin.get_url:
url: "https://debian.neo4j.com/neotechnology.gpg.key"
dest: "/usr/share/keyrings/neo4j.asc"
mode: "0644"
force: true
tags: install
- name: Adding repo
ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/neo4j.asc] https://debian.neo4j.com stable latest"
update_cache: true
state: present
tags: install
- name: Install package
ansible.builtin.apt:
package: "neo4j"
state: present
tags: install
- name: Holding package
ansible.builtin.dpkg_selections:
name: neo4j
selection: hold
tags: install
- name: Enable neo4j service
ansible.builtin.systemd_service:
name: neo4j.service
enabled: true
tags: install
- name: Deploy neo4j.yml
ansible.builtin.template:
src: neo4j.conf.j2
dest: "/etc/neo4j/neo4j.conf"
owner: neo4j
group: neo4j
mode: "0660"
tags: install,config
notify: Restart neo4j

View File

@@ -0,0 +1,9 @@
---
- name: Include install tasks
ansible.builtin.include_tasks: install.yml
tags: install,config
- name: Include backup tasks
ansible.builtin.include_tasks: backup.yml
tags: backup

View File

@@ -0,0 +1,196 @@
#*****************************************************************
# Neo4j configuration
#
# For more details and a complete list of settings, please see
# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/
#*****************************************************************
# The name of the default database
#initial.dbms.default_database=neo4j
# Paths of directories in the installation.
server.directories.data=/var/lib/neo4j/data
server.directories.plugins=/var/lib/neo4j/plugins
server.directories.logs=/var/log/neo4j
server.directories.lib=/usr/share/neo4j/lib
#server.directories.run=run
#server.directories.licenses=licenses
#server.directories.transaction.logs.root=data/transactions
server.logs.config=/etc/neo4j/server-logs.xml
server.logs.user.config=/etc/neo4j/user-logs.xml
server.directories.import=/var/lib/neo4j/import
dbms.usage_report.enabled=false
#********************************************************************
# Memory Settings
#********************************************************************
#
# Memory settings are specified kibibytes with the 'k' suffix, mebibytes with
# 'm' and gibibytes with 'g'.
# If Neo4j is running on a dedicated server, then it is generally recommended
# to leave about 2-4 gigabytes for the operating system, give the JVM enough
# heap to hold all your transaction state and query context, and then leave the
# rest for the page cache.
# Java Heap Size: by default the Java heap size is dynamically calculated based
# on available system resources. Uncomment these lines to set specific initial
# and maximum heap size.
server.memory.heap.initial_size={{ neo4j_heap_initial_size }}
server.memory.heap.max_size={{ neo4j_heap_max_size }}
# The amount of memory to use for mapping the store files.
# The default page cache memory assumes the machine is dedicated to running
# Neo4j, and is heuristically set to 50% of RAM minus the Java heap size.
server.memory.pagecache.size={{ neo4j_pagecache_size }}
# Limit the amount of memory that all of the running transaction can consume.
# The default value is 70% of the heap size limit.
dbms.memory.transaction.total.max={{ neo4j_memory_transaction_total_max }}
# Limit the amount of memory that a single transaction can consume.
# By default there is no limit.
#db.memory.transaction.max=16m
#*****************************************************************
# Network connector configuration
#*****************************************************************
# With default configuration Neo4j only accepts local connections.
# To accept non-local connections, uncomment this line:
server.default_listen_address={{ neo4j_default_listen_address }}
server.default_advertised_address={{ neo4j_default_advertised_address }}
# Bolt connector
server.bolt.enabled=true
server.bolt.tls_level=DISABLED
server.bolt.listen_address=:7687
server.bolt.advertised_address=:7687
# HTTP Connector. There can be zero or one HTTP connectors.
server.http.enabled=true
server.http.listen_address=:7474
server.http.advertised_address=:7474
# HTTPS Connector. There can be zero or one HTTPS connectors.
server.https.enabled=false
#*****************************************************************
# Logging configuration
#*****************************************************************
# To enable HTTP logging, uncomment this line
dbms.logs.http.enabled=true
# To enable GC Logging, uncomment this line
#server.logs.gc.enabled=true
# GC Logging Options
# see https://docs.oracle.com/en/java/javase/11/tools/java.html#GUID-BE93ABDC-999C-4CB5-A88B-1994AAAC74D5
#server.logs.gc.options=-Xlog:gc*,safepoint,age*=trace
# Number of GC logs to keep.
#server.logs.gc.rotation.keep_number=5
# Size of each GC log that is kept.
#server.logs.gc.rotation.size=20m
#*****************************************************************
# Miscellaneous configuration
#*****************************************************************
# Retention policy for transaction logs needed to perform recovery and backups.
db.tx_log.rotation.retention_policy=2 days 2G
#********************************************************************
# JVM Parameters
#********************************************************************
# G1GC generally strikes a good balance between throughput and tail
# latency, without too much tuning.
server.jvm.additional=-XX:+UseG1GC
# Have common exceptions keep producing stack traces, so they can be
# debugged regardless of how often logs are rotated.
server.jvm.additional=-XX:-OmitStackTraceInFastThrow
# Make sure that `initmemory` is not only allocated, but committed to
# the process, before starting the database. This reduces memory
# fragmentation, increasing the effectiveness of transparent huge
# pages. It also reduces the possibility of seeing performance drop
# due to heap-growing GC events, where a decrease in available page
# cache leads to an increase in mean IO response time.
# Try reducing the heap memory, if this flag degrades performance.
server.jvm.additional=-XX:+AlwaysPreTouch
# Trust that non-static final fields are really final.
# This allows more optimizations and improves overall performance.
# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or
# serialization to change the value of final fields!
server.jvm.additional=-XX:+UnlockExperimentalVMOptions
server.jvm.additional=-XX:+TrustFinalNonStaticFields
# Disable explicit garbage collection, which is occasionally invoked by the JDK itself.
server.jvm.additional=-XX:+DisableExplicitGC
# Restrict size of cached JDK buffers to 1 KB
server.jvm.additional=-Djdk.nio.maxCachedBufferSize=1024
# More efficient buffer allocation in Netty by allowing direct no cleaner buffers.
server.jvm.additional=-Dio.netty.tryReflectionSetAccessible=true
# Exits JVM on the first occurrence of an out-of-memory error. Its preferable to restart VM in case of out of memory errors.
# server.jvm.additional=-XX:+ExitOnOutOfMemoryError
# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes.
# This is to protect the server from any potential passive eavesdropping.
server.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048
# This mitigates a DDoS vector.
server.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true
# Enable remote debugging
#server.jvm.additional=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
# This filter prevents deserialization of arbitrary objects via java object serialization, addressing potential vulnerabilities.
# By default this filter whitelists all neo4j classes, as well as classes from the hazelcast library and the java standard library.
# These defaults should only be modified by expert users!
# For more details (including filter syntax) see: https://openjdk.java.net/jeps/290
#server.jvm.additional=-Djdk.serialFilter=java.**;org.neo4j.**;com.neo4j.**;com.hazelcast.**;net.sf.ehcache.Element;com.sun.proxy.*;org.openjdk.jmh.**;!*
# Increase the default flight recorder stack sampling depth from 64 to 256, to avoid truncating frames when profiling.
server.jvm.additional=-XX:FlightRecorderOptions=stackdepth=256
# Allow profilers to sample between safepoints. Without this, sampling profilers may produce less accurate results.
server.jvm.additional=-XX:+UnlockDiagnosticVMOptions
server.jvm.additional=-XX:+DebugNonSafepoints
# Open modules for neo4j to allow internal access
server.jvm.additional=--add-opens=java.base/java.nio=ALL-UNNAMED
server.jvm.additional=--add-opens=java.base/java.io=ALL-UNNAMED
server.jvm.additional=--add-opens=java.base/sun.nio.ch=ALL-UNNAMED
# Enable access to JDK vector API
# server.jvm.additional=--add-modules=jdk.incubator.vector
# Disable logging JMX endpoint.
server.jvm.additional=-Dlog4j2.disable.jmx=true
# Limit JVM metaspace and code cache to allow garbage collection. Used by cypher for code generation and may grow indefinitely unless constrained.
# Useful for memory constrained environments
#server.jvm.additional=-XX:MaxMetaspaceSize=1024m
#server.jvm.additional=-XX:ReservedCodeCacheSize=512m
# Allow big methods to be JIT compiled.
# Useful for big queries and big expressions where cypher code generation can create large methods.
#server.jvm.additional=-XX:-DontCompileHugeMethods
#********************************************************************
# Other Neo4j system properties
#********************************************************************
browser.remote_content_hostname_whitelist="localhost, guides.neo4j.com"
dbms.security.http_access_control_allow_origin="*"
dbms.security.procedures.allowlist=gds.*,apoc.*
dbms.security.procedures.unrestricted=gds.*,apoc.*
db.lock.acquisition.timeout=10s

View File

@@ -0,0 +1,278 @@
<?php
if (isset($_GET['invalidate'])) {
opcache_invalidate($_GET['invalidate'], true);
header('Location: ' . $_SERVER['PHP_SELF'].'#scripts');
}
if (isset($_GET['reset'])) {
opcache_reset();
header('Cache-Control: no-store, no-cache, must-revalidate, max-age=0');
header('Cache-Control: post-check=0, pre-check=0', false);
header('Pragma: no-cache');
header('Location: ' . $_SERVER['PHP_SELF'].'#scripts');
}
/**
* Fetch configuration and status information from OpCache
*/
$config = opcache_get_configuration();
$status = opcache_get_status();
/**
* Turn bytes into a human readable format
* @param $bytes
*/
function size_for_humans($bytes)
{
if ($bytes > 1048576) {
return sprintf("%.2f&nbsp;MB", $bytes/1048576);
} elseif ($bytes > 1024) {
return sprintf("%.2f&nbsp;kB", $bytes/1024);
} else {
return sprintf("%d&nbsp;bytes", $bytes);
}
}
function getOffsetWhereStringsAreEqual($a, $b)
{
$i = 0;
while (strlen($a) && strlen($b) && strlen($a) > $i && $a{$i} === $b{$i}) {
$i++;
}
return $i;
}
function getSuggestionMessage($property, $value)
{
switch ($property) {
case 'opcache_enabled':
return $value ? '' : '<span class="glyphicon glyphicon-search"></span> You should enabled opcache';
break;
case 'cache_full':
return $value ? '<span class="glyphicon glyphicon-search"></span> You should increase opcache.memory_consumption' : '';
break;
case 'opcache.validate_timestamps':
return $value ? '<span class="glyphicon glyphicon-search"></span> If you are in a production environment you should disabled it' : '';
break;
}
return '';
}
function getStringFromPropertyAndValue($property, $value)
{
if ($value === false) {
return 'false';
}
if ($value === true) {
return 'true';
}
switch ($property) {
case 'used_memory':
case 'free_memory':
case 'wasted_memory':
case 'opcache.memory_consumption':
return size_for_humans($value);
break;
case 'current_wasted_percentage':
case 'opcache_hit_rate':
return number_format($value, 2).'%';
break;
case 'blacklist_miss_ratio':
return number_format($value, 2);
break;
}
return $value;
}
?>
<!DOCTYPE html>
<html>
<head>
<title>OPcache Dashboard - Carlos Buenosvinos (@buenosvinos)</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="shortcut icon" href="//www.php.net/favicon.ico">
<link href="//netdna.bootstrapcdn.com/bootstrap/3.0.2/css/bootstrap.min.css" rel="stylesheet">
<style>
body { padding-top: 70px; }
h2 {
padding-top: 100px;
margin-top: -100px;
display: inline-block; /* required for webkit browsers */
}
</style>
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="//oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="//oss.maxcdn.com/libs/respond.js/1.3.0/respond.min.js"></script>
<![endif]-->
</head>
<body data-spy="scroll" data-target="#navbar-opcache">
<a href="https://github.com/carlosbuenosvinos/opcache-dashboard"><img style="position: absolute; top: 50px; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_gray_6d6d6d.png" alt="Fork me on GitHub"></a>
<nav id="navbar-opcache" class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">Zend OPcache <?= $config['version']['version']?></a>
</div>
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav">
<li><a href="#hits">Hits</a></li>
<li><a href="#memory">Memory</a></li>
<li><a href="#keys">Keys</a></li>
<li><a href="#status">Status</a></li>
<li><a href="#configuration">Configuration</a></li>
<li><a href="#scripts">Scripts</a></li>
</ul>
</div>
</nav>
<div class="container">
<div class="jumbotron">
<h1>OPcache Dashboard</h1>
<h2>by Carlos Buenosvinos (<a href="https://twitter.com/buenosvinos">@buenosvinos</a>)</h2>
<p>PHP: <?= phpversion() ?> and OPcache: <?= $config['version']['version'] ?></p>
</div>
<?php
$stats = $status['opcache_statistics'];
$hitRate = round($stats['opcache_hit_rate'], 2);
?>
<h2 id="hits">Hits: <?= $hitRate ?>%</h2>
<div class="progress progress-striped">
<div class="progress-bar progress-bar-success" style="width: <?= $hitRate ?>%">
<span class="sr-only">Hits</span>
</div>
<div class="progress-bar progress-bar-danger" style="width: <?= (100 - $hitRate) ?>%">
<span class="sr-only">Misses</span>
</div>
</div>
<?php
$mem = $status['memory_usage'];
$totalMemory = $config['directives']['opcache.memory_consumption'];
$usedMemory = $mem['used_memory'];
$freeMemory = $mem['free_memory'];
$wastedMemory = $mem['wasted_memory'];
?>
<h2 id="memory">Memory: <?= size_for_humans($wastedMemory + $usedMemory) ?> of <?= size_for_humans($totalMemory) ?></h2>
<div class="progress progress-striped">
<div class="progress-bar progress-bar-danger" style="width: <?= round(($wastedMemory / $totalMemory) * 100, 0) ?>%">
<span class="sr-only">Wasted memory</span>
</div>
<div class="progress-bar progress-bar-warning" style="width: <?= round(($usedMemory / $totalMemory) * 100, 0) ?>%">
<span class="sr-only">Used memory</span>
</div>
<div class="progress-bar progress-bar-success" style="width: <?= round(($freeMemory / $totalMemory) * 100, 0) ?>%">
<span class="sr-only">Free memory</span>
</div>
</div>
<?php
$totalKeys = $stats['max_cached_keys'];
$usedKeys = $stats['num_cached_keys'];
$freeKeys = $totalKeys - $usedKeys;
?>
<h2 id="keys">Keys: <?= $usedKeys ?> of <?= $totalKeys ?></h2>
<div class="progress progress-striped">
<div class="progress-bar progress-bar-warning" style="width: <?= round(($usedKeys / $totalKeys) * 100, 0) ?>%">
<span class="sr-only">Used keys</span>
</div>
<div class="progress-bar progress-bar-success" style="width: <?= round(($freeKeys / $totalKeys) * 100, 0) ?>%">
<span class="sr-only">Free keys</span>
</div>
</div>
<h2 id="status">Status</h2>
<div class="table-responsive">
<table class="table table-striped table-hover">
<?php
foreach ($status as $key => $value) {
if ($key == 'scripts') {
continue;
}
if (is_array($value)) {
foreach ($value as $k => $v) {
$v = getStringFromPropertyAndValue($k, $v);
$m = getSuggestionMessage($k, $v);
?><tr class="<?= $m ? 'danger' : '' ?>"><th align="left"><?= $k ?></th><td align="right"><?= $v ?></td><td><?= $m ?></td></tr><?php
}
continue;
}
$mess = getSuggestionMessage($key, $value);
$value = getStringFromPropertyAndValue($key, $value);
?><tr class="<?= $mess ? 'danger' : '' ?>"><th align="left"><?= $key ?></th><td align="right"><?= $value ?></td><td><?= $mess ?></td></tr><?php
}
?>
</table>
</div>
<h2 id="configuration">Configuration</h2>
<div class="table-responsive">
<table class="table table-striped table-hover">
<?php foreach ($config['directives'] as $key => $value) {
$mess = getSuggestionMessage($key, $value);
?>
<tr class="<?= $mess ? 'danger' : '' ?>" >
<th align="left"><?= $key ?></th>
<td align="right"><?= getStringFromPropertyAndValue($key, $value) ?></td>
<td align="left"><?= $mess ?></td>
</tr>
<?php } ?>
</table>
</div>
<h2 id="scripts">Scripts (<?= count($status["scripts"]) ?>) <a type="button" class="btn btn-success" href="?reset">Reset all</a></h2>
<table class="table table-striped">
<tr>
<th>Options</th>
<th>Hits</th>
<th>Memory</th>
<th>Path</th>
</tr>
<?php
uasort($status['scripts'], function ($a, $b) { return $a['hits'] < $b ['hits']; });
$offset = null;
$previousKey = null;
foreach ($status['scripts'] as $key => $data) {
$offset = min(
getOffsetWhereStringsAreEqual(
(null === $previousKey) ? $key : $previousKey,
$key
),
(null === $offset) ? strlen($key) : $offset
);
$previousKey = $key;
}
foreach ($status['scripts'] as $key => $data) {
?>
<tr>
<td><a href="?invalidate=<?= $data['full_path'] ?>">Invalidate</a></td>
<td><?= $data['hits'] ?></td>
<td><?= size_for_humans($data['memory_consumption']) ?></td>
<td><?= substr($data['full_path'], $offset - 1) ?></td>
</tr>
<?php } ?>
</table>
</div>
<script src="//code.jquery.com/jquery.js"></script>
<script src="//netdna.bootstrapcdn.com/bootstrap/3.0.2/js/bootstrap.min.js"></script>
</body>
</html>

View File

@@ -0,0 +1,49 @@
---
- name: Copy opcache script to server
ansible.builtin.copy:
src: opcache.php
dest: /data/services/web/default/opcache.php
owner: www-data
group: www-data
mode: "0644"
- name: Remove opcache tuning in www.conf pool
ansible.builtin.lineinfile:
path: "/etc/php/{{ php_version }}/fpm/pool.d/www.conf"
state: absent
line: "{{ item }}"
with_items:
- "php_value[opcache.max_accelerated_files] = 10000"
- "php_value[opcache.memory_consumption] = 128"
- name: On sort le serveur du pool
ansible.builtin.file:
path: /data/services/web/default/check/.online
state: absent
- name: Sleep for 6 seconds and continue with play
ansible.builtin.wait_for:
timeout: 6
delegate_to: localhost
- name: Restart php-fpm and apache
ansible.builtin.systemd:
name: "{{ item }}"
state: restarted
with_items:
- apache2
- "{{ 'php' + php_version + '-fpm' }}"
- name: On remet le serveur dans le pool
ansible.builtin.file:
path: /data/services/web/default/check/.online
state: touch
mode: "0777"
owner: "{{ user }}"
group: "{{ user }}"
- name: Sleep for 3 seconds and continue with play
ansible.builtin.wait_for:
timeout: 3
delegate_to: localhost

View File

@@ -0,0 +1,49 @@
# Installation et configuration de Postgresql
[TOC]
## Documentation
### Postgresql :
* [Postgresql Official Documentation](https://www.postgresql.org/docs/) (EN)
* [Postgresql Documentation Officielle](https://docs.postgresql.fr/) (FR)
### Modules Ansible :
* [Ansible Galaxy : Postgresql](https://galaxy.ansible.com/ui/repo/published/community/postgresql/) (EN)
## Configuration
### Variables
* postgresql_monitoring_(user/password) : Identifiants liés à la supervision et à pmm.
* postgresql_admin_role_attr_flags : Liste des rôles qu'on souhaite attribuer aux administrateurs. Défaut : CREATEDB,CREATEROLE,NOSUPERUSER.
* postgresql_pmm_server : Adresse du serveur PMM.
* postgresql_pmm_server_(username/password) : Identifiants utilisés pour se connecter au serveur PMM.
* postgresql_pmm_client_(username/password) : Identifiants utilisés pour se connecter au serveur Postgresql local.
* postgresql_users_networks : Liste des réseaux qui seront ajoutés au fichier pg_hba.conf pour les utilisateurs.
* postgresql_databases : Liste des bases de données à créer.
```
postgresql_databases:
- name: "testsla"
(optional) owner: "testsla"
schemas:
- name: "testsla_schema"
owner: "testsla"
### Tags
* install : Installe Postgresql et ses dépendances.
* config : Gère les configurations, créer les utilisateurs et effectue les tâches liées au monitoring.
* backup : Installe les composants nécessaires aux sauvegardes.
* monitoring : Installe et configure pmm-client.
* databases : Créer les bases de données et leur schémas.
## Fonctionnement du rôle
### Basique pour comprendre Postgresql
#### Rôles, utilisateurs et permissions
* Un utilisateur correspond à un rôle avec un droit de login.
* Tous les utilisateurs font partie du groupe "public".
#### Base de données, schémas et tables.
* Une base de données contient un ou plusieurs schémas qui contiennent une ou plusieurs tables.
* Les bases de données contiennent par défaut un schema "public" sur lequel le groupe "public" a les droits de lecture.

View File

@@ -0,0 +1,21 @@
---
postgresql_default_data_dir: "/var/lib/postgresql/17/main"
postgresql_pmm_client_username: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/{{ env }}/{{ postgresql_monitoring_user }}:pmm_client_username') }}"
postgresql_pmm_client_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/{{ env }}/{{ postgresql_monitoring_user }}:pmm_client_password') }}"
postgresql_pmm_server: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/server:pmm_server') }}"
postgresql_pmm_server_username: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/server:pmm_server_username') }}"
postgresql_pmm_server_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/server:pmm_server_password') }}"
postgresql_monitoring_user: "monitoring"
postgresql_monitoring_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/{{ env }}/monitoring:password') }}"
postgresql_backup_user: "backup"
postgresql_backup_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/{{ env }}/backup:password') }}"
postgresql_replication_user: "replica"
postgresql_replication_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/{{ env }}/replica:password') }}"
postgresql_admin_role_attr_flags: "CREATEDB,CREATEROLE,NOSUPERUSER"
postgresql_cmk_url: ""

View File

@@ -0,0 +1,17 @@
[Unit]
Description=PostgreSQL database server
Documentation=man:postgres(1)
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
User=postgres
ExecStart=/usr/lib/postgresql/16/bin/pg_ctl start -D /etc/postgresql/16/main/
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGINT
TimeoutSec=infinity
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,10 @@
---
- name: Daemon_reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart Postgres
ansible.builtin.systemd_service:
name: postgresql.service
state: restarted

View File

@@ -0,0 +1,90 @@
---
- name: Install dependencies
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg2
- lsb-release
- nfs-common
tags: install,config,backup
- name: Setting up pg_hba conf for backup user
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
users: backup
source: "127.0.0.1"
databases: all
method: scram-sha-256
create: true
become: true
become_user: postgres
tags: install,config,backup
- name: Creating backup user
community.postgresql.postgresql_user:
name: "{{ postgresql_backup_user }}"
password: "{{ postgresql_backup_password }}"
become: true
become_user: postgres
tags: install,config,backup
- name: Ensure needed directory exists
ansible.builtin.file:
path: "{{ item }}"
mode: "0755"
owner: root
group: root
state: directory
loop:
- "/data/scripts/"
- "/nas/"
tags: install,config,backup
- name: Setting up mount point for nas
ansible.posix.mount:
path: "/nas"
src: "{{ postgresql_nfs_server }}:/data/shares/postgresql"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: mounted
tags: install,config,backup
- name: Deploying pgpass for backup user
ansible.builtin.template:
src: "pgpass-backup.j2"
dest: "/root/.pgpass"
owner: root
group: root
mode: "0600"
tags: install,config,backup
- name: Deploying backup script
ansible.builtin.template:
src: "postgresql-dump-full.sh.j2"
dest: "/data/scripts/postgresql-dump-full.sh"
owner: root
group: root
mode: "0700"
tags: install,config,backup
- name: Setting up cron for backup
ansible.builtin.cron:
name: "postgresql backup"
minute: "0"
hour: "14"
job: "/data/scripts/postgresql-dump-full.sh -r 10 -d /nas -c"
user: root
cron_file: postgresql-backup
state: present
disabled: true
tags: install,config,backup
- name: Adding line to mrpe.cfg
ansible.builtin.lineinfile:
path: "/etc/check_mk/mrpe.cfg"
regexp: "^#postgresql_dump"
line: "#postgresql_dump /usr/local/nagios/plugins/check_batch postgresql-dump-full.sh 129600"
tags: install,config,backup

View File

@@ -0,0 +1,33 @@
---
- name: Create databases
community.postgresql.postgresql_db:
name: "{{ item.name }}"
owner: "{{ item.owner | default('postgres') }}"
become: true
become_user: postgres
loop: "{{ postgresql_databases }}"
tags: databases
- name: Create schemas in databases
community.postgresql.postgresql_schema:
name: "{{ item.1.name }}"
db: "{{ item.0.name }}"
owner: "{{ item.1.owner | default('postgres') }}"
comment: "{{ item.comment | default('') }}"
become: true
become_user: postgres
loop: "{{ postgresql_databases | subelements('schemas') }}"
tags: databases
- name: Grant usage on new schemas to public role
community.postgresql.postgresql_privs:
database: "{{ item.0.name }}"
objs: "{{ item.1.name }}"
type: "schema"
privs: "USAGE"
role: "public"
become: true
become_user: postgres
loop: "{{ postgresql_databases | subelements('schemas') }}"
tags: databases

View File

@@ -0,0 +1,125 @@
---
- name: Install requirements
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg
- curl
- apt-transport-https
- debian-keyring
- python3-psycopg2
tags: install,conf
- name: Import postgres key
ansible.builtin.get_url:
url: "https://www.postgresql.org/media/keys/ACCC4CF8.asc"
dest: "/usr/share/keyrings/postgres.ACCC4CF8.asc"
mode: "0644"
force: true
tags: install
- name: Add Postgres repository
ansible.builtin.apt_repository:
filename: postgres
repo: "deb [signed-by=/usr/share/keyrings/postgres.ACCC4CF8.asc] https://apt.postgresql.org/pub/repos/apt bookworm-pgdg main"
tags: install,conf
- name: Install Postgresql
ansible.builtin.apt:
name: "{{ item }}"
state: present
tags: install,conf
with_items:
- postgresql
- postgresql-client
- libpq-dev
- name: Holding postgres packages
ansible.builtin.dpkg_selections:
name: "{{ item }}"
selection: hold
with_items:
- postgresql
- postgresql-client
- libpq-dev
- python3-psycopg2
tags: install,conf
- name: Deploy systemd service file
ansible.builtin.copy:
src: postgresql.service
dest: "/lib/systemd/system/postgresql.service"
mode: "0644"
owner: root
group: root
tags: install
notify:
- Daemon_reload
- Restart Postgres
- name: Deploy Postgresql config file
ansible.builtin.copy:
src: "postgresql.conf"
dest: "/etc/postgresql/16/main/postgresql.conf"
owner: postgres
group: postgres
mode: "0644"
tags: install,conf
notify: Restart Postgres
- name: Enable and start postgres service
ansible.builtin.systemd_service:
name: postgresql.service
state: started
enabled: true
- name: Setting up pg_hba conf for postgres
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: local
databases: all
users: postgres
method: peer
create: true
become: true
become_user: postgres
tags: install
- name: Setting up pg_hba conf for replica
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
databases: replication
source: "{{ item }}"
users: replica
method: scram-sha-256
create: true
become: true
become_user: postgres
with_items: "{{ postgresql_replication_networks }}"
tags: install
- name: Creating replica users
community.postgresql.postgresql_user:
name: "{{ postgresql_replication_user }}"
password: "{{ postgresql_replication_password }}"
role_attr_flags: "REPLICATION"
become: true
become_user: postgres
tags: install
- name: Setting up pg_hba conf for ILG/APP users
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
users: all
source: "{{ item }}"
databases: all
method: scram-sha-256
create: true
with_items: "{{ postgresql_users_networks }}"
become: true
become_user: postgres
tags: install

View File

@@ -0,0 +1,17 @@
---
- name: Import install tasks
ansible.builtin.include_tasks: install.yml
tags: install
- name: Import backup related tasks
ansible.builtin.include_tasks: backup.yml
tags: config,backup
- name: Import monitoring tasks
ansible.builtin.include_tasks: monitoring.yml
tags: config,monitoring,pmm_register
- name: Import database related tasks
ansible.builtin.include_tasks: databases.yml
tags: databases

View File

@@ -0,0 +1,77 @@
---
- name: Setting up pg_hba conf for monitoring users
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
source: 127.0.0.1
users: monitoring
databases: all
method: scram-sha-256
create: true
become: true
become_user: postgres
tags: config,monitoring
- name: Creating monitoring user
community.postgresql.postgresql_user:
name: "{{ postgresql_monitoring_user }}"
password: "{{ postgresql_monitoring_password }}"
become: true
become_user: postgres
tags: config,monitoring
- name: Granting privileges to monitoring user
community.postgresql.postgresql_privs:
database: postgres
type: group
roles: "{{ postgresql_monitoring_user }}"
objs: "pg_monitor"
state: present
become: true
become_user: postgres
tags: config,monitoring
- name: Deploying checkmk config file
ansible.builtin.template:
src: "postgres.cfg.j2"
dest: "/etc/check_mk/postgres.cfg"
owner: root
group: root
mode: "0644"
tags: config,monitoring
- name: Deploying checkmk mk_postgres.py
ansible.builtin.get_url:
url: "https://{{ postgres_cmk_url }}/check_mk/agents/plugins/mk_postgres.py"
dest: "/usr/lib/check_mk_agent/plugins/mk_postgres.py"
owner: root
group: root
mode: "0755"
tags: config,monitoring
- name: Installing percona tools repo
ansible.builtin.apt:
deb: https://repo.percona.com/apt/percona-release_latest.{{ ansible_distribution_release }}_all.deb
tags: config,monitoring
- name: Installation pmm2-client
ansible.builtin.apt:
update_cache: true
pkg: pmm2-client
state: present
tags: config,monitoring
- name: Register on pmm server
ansible.builtin.command:
cmd: pmm-admin config --server-insecure-tls --server-url=https://{{ postgresql_pmm_server_username }}:{{ postgresql_pmm_server_password }}@{{ postgresql_pmm_server }}:443
register: register_server
changed_when: register_server.rc != 0
tags: pmm_register
- name: Adding Postgresql to pmm
ansible.builtin.command:
cmd: pmm-admin add postgresql --username={{ postgresql_pmm_client_username }} --password={{ postgresql_pmm_client_password }}
register: add_server
changed_when: add_server.rc != 0
tags: pmm_register

View File

@@ -0,0 +1 @@
localhost:5432:*:{{ postgresql_backup_user }}:{{ postgresql_backup_password }}

View File

@@ -0,0 +1 @@
INSTANCE=127.0.0.1:{{ postgresql_monitoring_user }}:{{ postgresql_monitoring_password }}

View File

@@ -0,0 +1,92 @@
#!/bin/bash
set -eu
DATE=$(date '+%Y%m%d %H%M%S')
TODAY=$(date '+%Y%m%d')
HOSTNAME=$(hostname -s)
STATUS=0
LOGFILE="/data/log/scripts/postgresql-dump-databases.log"
PGSQL_HOST="localhost"
PGSQL_USER="{{ postgresql_backup_user }}"
COMPRESS=false
touch ${LOGFILE}
#
# Fonctions
#
checkNas()
{
if [ ! -e "${BACKUPDIR}/.mount" ]; then
echo "${BACKUPDIR} not mounted. Backup aborted." | tee -a ${LOGFILE}
exit 1
fi
}
usage()
{
echo "$0 -r <retention> -d <repertoire> -c (compression)"
echo "Exemple : /data/scripts/postgresql-dump-full.sh -r 20 -d /nas -c"
}
#
# Main
#
while getopts "hcr:d:" option
do
case "${option}"
in
r)
RETENTION=${OPTARG};;
d)
BACKUPDIR=${OPTARG};;
c)
COMPRESS=true;;
h | *)
usage
exit 1;;
esac
done
echo "Lancement du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}
mkdir -p "$BACKUPDIR"/postgresqldump/ | tee -a ${LOGFILE}
find "$BACKUPDIR"/postgresqldump/ -mindepth 1 -maxdepth 1 -type f -daystart -mtime +"${RETENTION}" -delete | tee -a ${LOGFILE}
# Ne marche pas quand le backup tourne sur un replica
#echo "[${DATE}] - Granting superuser to ${PGSQL_USER} user" | tee -a ${LOGFILE}
#/usr/bin/su - postgres -c "psql --command 'ALTER USER ${PGSQL_USER} WITH SUPERUSER'" | tee -a ${LOGFILE}
DB_LIST=$(/usr/bin/su - postgres -c "psql --csv --command 'select datname from pg_catalog.pg_database'")
for db in ${DB_LIST} ; do
if [ ! "$db" = "datname" ] && [ ! "$db" = "template0" ] ; then
echo "[${DATE}] - Dumping database : $db" | tee -a ${LOGFILE}
if [ $COMPRESS = true ] ; then
/usr/bin/pg_dump -Z gzip -f "${BACKUPDIR}"/postgresqldump/"${TODAY}"-"${HOSTNAME}"-"${db}".gz -U "${PGSQL_USER}" -w -h "${PGSQL_HOST}" "${db}" | tee -a ${LOGFILE}
STATUS=${PIPESTATUS[0]}
else
/usr/bin/pg_dump -f "${BACKUPDIR}"/postgresqldump/"${TODAY}"-"${HOSTNAME}"-"${db}".sql -U "${PGSQL_USER}" -w -h "${PGSQL_HOST}" "${db}" | tee -a "${LOGFILE}"
STATUS=${PIPESTATUS[0]}
fi
if [ ! ${STATUS} -eq 0 ]; then
echo "[${DATE}][CRIT] Dump of $db failed" | tee -a ${LOGFILE}
echo "[${DATE}] - Revoking superuser from ${PGSQL_USER} user" | tee -a ${LOGFILE}
/usr/bin/su - postgres -c "psql --command 'ALTER USER ${PGSQL_USER} WITH NOSUPERUSER'" | tee -a ${LOGFILE}
OUTPUT="${OUTPUT} $db"
break
fi
fi
done
# Ne marche pas quand le backup tourne sur un replica
#echo "[${DATE}] - Revoking superuser from ${PGSQL_USER} user" | tee -a ${LOGFILE}
#/usr/bin/su - postgres -c "psql --command 'ALTER USER ${PGSQL_USER} WITH NOSUPERUSER'" | tee -a ${LOGFILE}
STATUS=$?
# output in statusfile for checkmk
echo "$(date +%s)|${STATUS}|Check log file ${LOGFILE}" > /var/tmp/batch."$(basename "$0")"
echo "Fin du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}

View File

@@ -0,0 +1,823 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '{{ postgresql_default_data_dir }}' # use data in another directory
# (change requires restart)
#hba_file = '/etc/postgresql/16/main/pg_hba.conf' # host-based authentication file
hba_file = '{{ postgresql_default_data_dir }}/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '{{ postgresql_default_data_dir }}/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/16-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = {{ postgresql_max_connections | default("100") }} # (change requires restart)
#reserved_connections = 0 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#scram_iterations = 4096
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
#gss_accept_delegation = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is usually the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
#vacuum_buffer_usage_limit = 256kB # size of vacuum and analyze buffer access strategy ring;
# 0 to disable vacuum buffer access strategy;
# range 128kB to 16GB
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enables compression of full-page writes;
# off, pglz, lz4, zstd, or on
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Prefetching during recovery -
#recovery_prefetch = try # prefetch pages referenced in the WAL?
#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
# (change requires restart)
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_library = '' # library to use to archive a WAL file
# (empty string indicates archive_command should
# be used)
#archive_command = '' # command to use to archive a WAL file
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a WAL file switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived WAL file
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_presorted_aggregate = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#recursive_worktable_factor = 10.0 # range 0.001-1000000
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, jsonlog, syslog, and
# eventlog, depending on platform.
# csvlog and jsonlog require
# logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr, jsonlog,
# and csvlog into log files. Required
# to be on for csvlogs and jsonlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
#log_startup_progress_interval = 10s # Time between progress updates for
# long-running startup operations.
# 0 disables the feature, > 0 indicates
# the interval in milliseconds.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = 10min # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = on
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Europe/Paris'
# - Process Title -
cluster_name = '16/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Cumulative Query and Index Statistics -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_fetch_consistency = cache # cache, none, snapshot
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
#createrole_self_grant = '' # set and/or inherit
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Paris'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting
#icu_validation_level = warning # report ICU locale validation
# errors at the given level
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -0,0 +1,106 @@
# Installation et configuration de RabbitMQ
## Documentation
### RabbitMQ :
* [RabbitMQ Production Checklist](https://www.rabbitmq.com/production-checklist.html)
* [RabbitMQ Prometheus exporter](https://www.rabbitmq.com/prometheus.html)
* [RabbitMQ Config file exemple complet](https://github.com/rabbitmq/rabbitmq-server/blob/v3.12.x/deps/rabbit/docs/rabbitmq.conf.example)
* [RabbitMQ Authorisation and access control](https://rabbitmq.com/access-control.html)
* [RabbitMQctl](https://www.rabbitmq.com/rabbitmqctl.8.html)
### Modules ansible-galaxy :
* [Ansible Galaxy : Rabbitmq](https://galaxy.ansible.com/ui/repo/published/community/rabbitmq/)
## Configuration
Les modifications de configuration sont à faire dans le fichier [templates/rabbitmq.conf.j2](templates/rabbitmq.conf.j2)
## Variables
* rabbitmq_cluster_name : Nom du cluster rabbitq. (Default: default)
* rabbitmq_cluster_nodes : Liste des noeuds appartenant au cluster.
* rabbitmq_admin_username : Nom de l'utilisateur admin. (Default : admin)
* rabbitmq_admin_password : Mot de passe de l'utilisateur admin.
* rabbitmq_plugins: Liste des plugins Rabbitmq à installer. (Default : rabbitmq_management,rabbitmq_shovel,rabbitmq_prometheus)
* rabbitmq_vhosts : Liste des vhosts. (Default : "/")
* rabbitmq_app_users : Liste des utilisateurs applicatifs à créer. Par défaut les utilisateurs ont tous les privilèges sur le vhost.
```
rabbitmq_app_users:
- username: "consult"
password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/rabbitmq/{{ env }}/users/consult:password') }}"
vhost: "consult"
```
## Fonctionnalités
* Installe les dépendances du rôle, rabbitmq et erlang.
* Supprime l'utilisateur guest créé par défaut et créer un utilisateur admin.
* Active les plugins de management (interface web), prometheus (exporter intégré) et shovel.
* Déploie les utilisateurs et les vhosts applicatifs.
## Tags
* install : installe rabbitmq et ses dépendances.
* config : supprime l'utilisateur guest, créer l'utilisateur admin, les vhosts et les utilisateurs applicatifs.
* users: deploie les utilisateurs et les vhosts.
* vhosts: deploie les vhosts.
* exchanges: deploie les exchanges.
## Premier lancement pour création d'un cluster
1. Lancer le playbook avec le tag install :
```
ansible-playbook -l rabbitmq_cluster playbooks/rabbitmq.yml -t install
```
2. Se rendre sur les machines 2 et 3 et renseigner les commandes suivantes pour créer le cluster :
```
rabbitmqctl stop_app
rabbitmqctl --longnames join_cluster node1.example.net
rabbitmqctl start_app
```
3. Lancer le playbook avec le tag config :
```
ansible-playbook -l rabbitmq_cluster playbooks/rabbitmq.yml -t config
```
## Modification de configuration
* Création de toutes les ressources : users, vhost, exchange, queues et bindings :
```
ansible-playbook playbooks/rabbitmq.yml -t config -l rabbitmq_cluster
```
* Déploiement des utilisateurs applicatifs et des vhosts :
```
ansible-playbook playbooks/rabbitmq.yml -t users -l rabbitmq_cluster
```
## Tests de performance
[RabbitMQ perf-tests](https://github.com/rabbitmq/rabbitmq-perf-test)
### Pré-requis
* Installer Java
* Créer un utilisateur avec tous les droits sur un vhost dédiés.
```
rabbitmqctl add_vhost testsla
rabbitmqctl add_user test_sla sebisdown -p testsla
rabbitmqctl set_permissions -p testsla test_sla ".*" ".*" ".*"
```
### Exemple de test
* Test sur une quorum-queue nommée 'qq', avec des messages de 4Ko publiés par 5 process et consommés par 15 process. Avec des taux variables : 200 msg/process/seconde pendant 240 secondes puis 400 msg/process/seconde pendant 120 secondes puis 300 msg/process/seconde pendant 120 secondes, en boucle.
```
java -jar perf-test-2.20.0.jar -h amqp://test_sla:sebisdown@rabbitmq-vip.example.com:5674/testsla --quorum-queue --queue qq --size 4000 --variable-rate 200:240 --variable-rate 400:120 --variable-rate 300:120 --producers 5 --consumers 15
```
* Test illimité avec un seul publieur et un consommateur.
```
java -jar perf-test-2.20.0.jar -h amqp://test_sla:sebisdown@rabbitmq-vip.example.com:5674/testsla
```
* Test illimité sur une quorum-queue avec un seul publieur et un consommateur.
```
java -jar perf-test-2.20.0.jar -h amqp://test_sla:sebisdown@rabbitmq-vip.example.com:5674/testsla --quorum-queue --queue qq
```
* Test illimité sur une quorum-queue avec un taux de 100 msg/secondes pour un seul publieur et un seul consommateur.
```
java -jar perf-test-2.20.0.jar -h amqp://test_sla:sebisdown@rabbitmq-vip.example.com:5674/testsla --quorum-queue --queue qq --rate 100
```

View File

@@ -0,0 +1,36 @@
---
rabbitmq_cluster_name: "default"
rabbitmq_admin_username: "admin"
rabbitmq_app_users: ""
rabbitmq_vhosts: ""
rabbitmq_plugins: "rabbitmq_management,rabbitmq_shovel,rabbitmq_prometheus,rabbitmq_shovel_management"
rabbitmq_collect_statistics_interval: 30000
# rabbitmq_exchanges:
# - name: "myapp.topic"
# type: "topic"
# vhost: "myapp"
#
# rabbitmq_queues:
# - name: "myapp.queue"
# vhost: "myapp"
# dead_letter_exchange: ""
# dead_letter_routing_key: "myapp.dlq"
# arguments:
# x-queue-type: quorum
#
# rabbitmq_bindings:
# - name: "myapp.topic"
# vhost: "myapp"
# destination: "myapp.queue"
# destination_type: "queue"
# routing_key: "*"
#
# rabbitmq_app_users:
# - username: "myapp"
# password: "{{ lookup('community.hashi_vault.hashi_vault','ansible/data/rabbitmq/{{ env }}/users/myapp:password') }}"
# vhost: "myapp"
# read_priv: "^myapp.*"
# write_priv: "^myapp.*"
# configure_priv: "^$"

View File

@@ -0,0 +1,4 @@
{{ ansible_managed }}
[Service]
LimitNOFILE=65536

View File

@@ -0,0 +1,9 @@
{{ ansible_managed }}
/var/log/rabbitmq/*.log {
daily
missingok
rotate 7
compress
notifempty
}

View File

@@ -0,0 +1,10 @@
---
- name: Daemon_reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart Rabbitmq
ansible.builtin.systemd_service:
name: rabbitmq-server.service
state: restarted

View File

@@ -0,0 +1,117 @@
---
- name: Deploy rabbitmq config file
ansible.builtin.template:
src: rabbitmq.conf.j2
dest: /etc/rabbitmq/rabbitmq.conf
mode: "0644"
owner: rabbitmq
group: rabbitmq
notify: Restart Rabbitmq
tags: config
- name: Deploy rabbitmq env file
ansible.builtin.template:
src: rabbitmq-env.conf.j2
dest: /etc/rabbitmq/rabbitmq-env.conf
mode: "0644"
owner: rabbitmq
group: rabbitmq
notify: Restart Rabbitmq
tags: config
- name: Enabling Rabbitmq plugins
community.rabbitmq.rabbitmq_plugin:
name: "{{ rabbitmq_plugins }}"
state: enabled
tags: config
- name: Delete guest user
community.rabbitmq.rabbitmq_user:
user: guest
state: absent
tags: config
- name: Create vhosts
community.rabbitmq.rabbitmq_vhost:
name: "{{ item }}"
state: present
with_items: "{{ rabbitmq_vhosts }}"
tags: config,users,vhosts
- name: Create admin user
community.rabbitmq.rabbitmq_user:
user: "{{ rabbitmq_admin_username }}"
password: "{{ rabbitmq_admin_password }}"
vhost: "{{ item }}"
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
tags: administrator
tags: config
with_items: "{{ rabbitmq_vhosts }}"
- name: Create checkmk user
community.rabbitmq.rabbitmq_user:
user: "{{ rabbitmq_checkmk_username }}"
password: "{{ rabbitmq_checkmk_password }}"
vhost: /
configure_priv: ""
read_priv: .*
write_priv: ""
state: present
tags: monitoring
tags: config
- name: Create exchanges
community.rabbitmq.rabbitmq_exchange:
name: "{{ item.name }}"
type: "{{ item.type }}"
vhost: "{{ item.vhost }}"
state: present
login_user: "{{ rabbitmq_admin_username }}"
login_password: "{{ rabbitmq_admin_password }}"
with_items: "{{ rabbitmq_exchanges }}"
tags: config,exchanges
- name: Create queues
community.rabbitmq.rabbitmq_queue:
login_user: "{{ rabbitmq_admin_username }}"
login_password: "{{ rabbitmq_admin_password }}"
state: "{{ item.state | default('present') }}"
vhost: "{{ item.vhost }}"
name: "{{ item.name }}"
durable: "{{ item.durable | default(true) }}"
dead_letter_exchange: "{{ item.dead_letter_exchange | default() }}"
dead_letter_routing_key: "{{ item.dead_letter_routing_key | default() }}"
arguments: "{{ item.arguments | default({}) }}"
with_items: "{{ rabbitmq_queues }}"
tags: config,queues
- name: Create bindings
community.rabbitmq.rabbitmq_binding:
login_user: "{{ rabbitmq_admin_username }}"
login_password: "{{ rabbitmq_admin_password }}"
state: "{{ item.state | default('present') }}"
vhost: "{{ item.vhost }}"
name: "{{ item.name }}"
destination: "{{ item.destination }}"
destination_type: "{{ item.destination_type }}"
routing_key: "{{ item.routing_key }}"
arguments: "{{ item.arguments | default({}) }}"
with_items: "{{ rabbitmq_bindings }}"
tags: config,bindings
- name: Create app users
community.rabbitmq.rabbitmq_user:
user: "{{ item.username }}"
password: "{{ item.password }}"
vhost: "{{ item.vhost }}"
read_priv: "{{ item.read_priv | default('.*') }}"
write_priv: "{{ item.write_priv | default('.*') }}"
configure_priv: "{{ item.configure_priv | default('.*') }}"
state: present
tags: monitoring,management
with_items: "{{ rabbitmq_app_users }}"
tags: config,users

View File

@@ -0,0 +1,156 @@
---
- name: Install requirements
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg
- curl
- apt-transport-https
- debian-keyring
tags: install,conf
- name: Import cloudsmith key (Rabbitmq-server)
ansible.builtin.get_url:
url: "https://dl.cloudsmith.io/public/rabbitmq/rabbitmq-server/gpg.9F4587F226208342.key"
dest: "/usr/share/keyrings/rabbitmq.9F4587F226208342.asc"
mode: "0644"
force: true
tags: install
- name: Import cloudsmith key (Erlang)
ansible.builtin.get_url:
url: "https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-erlang.E495BB49CC4BBE5B.key"
dest: "/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg"
mode: "0644"
force: true
tags: install
- name: Import cloudsmith key (Erlang) - 2
ansible.builtin.get_url:
url: "https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-server.9F4587F226208342.key"
dest: "/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg"
mode: "0644"
force: true
tags: install
- name: Add rabbitmq repository
ansible.builtin.apt_repository:
filename: rabbitmq
repo: "{{ item }}"
with_items:
- 'deb [arch=amd64 signed-by=/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg] https://ppa1.rabbitmq.com/rabbitmq/rabbitmq-erlang/deb/debian bookworm main'
- 'deb-src [signed-by=/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg] https://ppa1.rabbitmq.com/rabbitmq/rabbitmq-erlang/deb/debian bookworm main'
- 'deb [arch=amd64 signed-by=/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg] https://ppa2.rabbitmq.com/rabbitmq/rabbitmq-erlang/deb/debian bookworm main'
- 'deb-src [signed-by=/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg] https://ppa2.rabbitmq.com/rabbitmq/rabbitmq-erlang/deb/debian bookworm main'
- 'deb [arch=amd64 signed-by=/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg] https://ppa1.rabbitmq.com/rabbitmq/rabbitmq-server/deb/debian bookworm main'
- 'deb-src [signed-by=/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg] https://ppa1.rabbitmq.com/rabbitmq/rabbitmq-server/deb/debian bookworm main'
- 'deb [arch=amd64 signed-by=/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg] https://ppa2.rabbitmq.com/rabbitmq/rabbitmq-server/deb/debian bookworm main'
- 'deb-src [signed-by=/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg] https://ppa2.rabbitmq.com/rabbitmq/rabbitmq-server/deb/debian bookworm main'
tags: install
- name: Install Erlang packages
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- erlang-base
- erlang-asn1
- erlang-crypto
- erlang-eldap
- erlang-ftp
- erlang-inets
- erlang-mnesia
- erlang-os-mon
- erlang-parsetools
- erlang-public-key
- erlang-runtime-tools
- erlang-snmp
- erlang-ssl
- erlang-syntax-tools
- erlang-tftp
- erlang-tools
- erlang-xmerl
tags: install
- name: Install rabbitmq-server
ansible.builtin.apt:
name: "rabbitmq-server"
state: present
tags: install
- name: Hold rabbitmq and Erlang packages
ansible.builtin.dpkg_selections:
name: "{{ item }}"
selection: hold
with_items:
- rabbitmq-server
- erlang-base
tags: install
- name: Create systemd rabbit override directory
ansible.builtin.file:
path: "/etc/systemd/system/rabbitmq-server.service.d"
state: directory
owner: root
group: root
mode: "0755"
tags: install
- name: Set LimitNOFILE
ansible.builtin.copy:
src: limits.conf
dest: "/etc/systemd/system/rabbitmq-server.service.d/limits.conf"
mode: "0644"
owner: root
group: root
tags: install
notify:
- Daemon_reload
- Restart Rabbitmq
- name: Set Erlang cookie
ansible.builtin.template:
src: erlang.cookie
dest: /var/lib/rabbitmq/.erlang.cookie
owner: rabbitmq
group: rabbitmq
mode: "0400"
tags: install
notify: Restart Rabbitmq
- name: Ensure rabbitmq-server service is enabled
ansible.builtin.systemd_service:
name: rabbitmq-server.service
enabled: true
tags: install
- name: Deploy rabbitmq config file
ansible.builtin.template:
src: rabbitmq.conf.j2
dest: /etc/rabbitmq/rabbitmq.conf
mode: "0644"
owner: rabbitmq
group: rabbitmq
notify: Restart Rabbitmq
tags: install
- name: Deploy rabbitmq env file
ansible.builtin.template:
src: rabbitmq-env.conf.j2
dest: /etc/rabbitmq/rabbitmq-env.conf
mode: "0644"
owner: rabbitmq
group: rabbitmq
notify: Restart Rabbitmq
tags: install
- name: Deploy logrotate conf
ansible.builtin.copy:
src: logrotate
dest: /etc/logrotate.d/rabbitmq-server
mode: "0644"
user: root
group: root
tags: install

View File

@@ -0,0 +1,9 @@
---
- name: Import install tasks
ansible.builtin.include_tasks: install.yml
tags: install
- name: Import config tasks
ansible.builtin.include_tasks: config.yml
tags: config,users,queues,vhosts,exchanges,bindings

View File

@@ -0,0 +1 @@
{{ lookup('community.hashi_vault.hashi_vault','ansible/data/rabbitmq/{{ env }}/erlang-cookie:value') }}

View File

@@ -0,0 +1,3 @@
#{{ ansible_managed }}
RABBITMQ_USE_LONGNAME=true

View File

@@ -0,0 +1,31 @@
#{{ ansible_managed }}
# Limit to 70% of RAM
vm_memory_high_watermark.relative = 0.7
# Tuning raft WAL log file https://rabbitmq.com/quorum-queues.html#segment-entry-count
raft.segment_max_entries = 32768
raft.wal_max_size_bytes = 32000000
####################
#
# Cluster settings
#
####################
cluster_formation.peer_discovery_backend = classic_config
cluster_name = {{ rabbitmq_cluster_name }}
{% for item in rabbitmq_cluster_nodes %}
cluster_formation.classic_config.nodes.{{ loop.index }} = rabbit{{ loop.index }}@{{ item.hostname }}
{% endfor %}
####################
#
# Prometheus Plugins
#
####################
## Statistics collection interval (in milliseconds). Increasing
## this will reduce the load on management database.
##
collect_statistics_interval = {{ rabbitmq_collect_statistics_interval }}

View File

@@ -0,0 +1,3 @@
---
ssh_host_keys_keys: ''

View File

@@ -0,0 +1,22 @@
---
- name: Reconfigure sshd
ansible.builtin.command: dpkg-reconfigure openssh-server
changed_when: true
- name: Restart sshd
ansible.builtin.service:
name: ssh
state: restarted
- name: Removing ssh host public and private keys
ansible.builtin.file:
path: "{{ item.1 }}"
state: absent
register: state_pub
with_subelements:
- "{{ ssh_host_keys_keys }}"
- files
notify:
- Reconfigure sshd
- Restart sshd

View File

@@ -0,0 +1,20 @@
---
- name: Set specific variables for distributions
ansible.builtin.include_vars: '{{ item }}'
with_first_found:
- '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml'
- '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
- '{{ ansible_distribution }}.yml'
- '{{ ansible_os_family }}.yml'
- default.yml
- name: Checking if host keys have been changed
ansible.builtin.lineinfile:
line: "{{ item.pub_key }}"
dest: "{{ item.files.0 }}"
state: absent
check_mode: true
with_items: "{{ ssh_host_keys_keys }}"
notify: Removing ssh host public and private keys

View File

@@ -0,0 +1,18 @@
---
ssh_host_keys_keys:
- type: ssh_rsa
pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDK+gk7loCySamkbnp4MseVMlNpv03AuxKKW/HSGy5a1BigD7ZcFXmAybY4gbpEe8IHZkBee/5I9DIEh827H6hbL/rhvtx172Vp3tsuPUgDtFJZJqZH4mMKyO713K3qzldBN4zbMUytIy6acFzNiPksAtwib6CtBFcfTreeRrVhaut/B1T9MK38MARXa6g3oNtqSgHOwKGbYHWsQy1Ekav+KosALlyYSVs4d0ioWbO74/hir+Of/5bJpqzddhaYUCxsvmdmBGQtj6n4pQBPuWVWY8a7DN1Js2/TNfgyi++9WH16/sGxbOMPpQmMVvJ9Zg0DhOuOFERAZdTfoRjVi2QOEH1nMaQTocQnd9Oy53XoshAsNYn9TWywzGe6fjw8qzpp+a3ko6hkmYS2/mvIe8gL0suBy1bJpaze9JIX/RIxbQa7GpD0PApabgU4eO/gZZpNAeyFYsFaxNtyuFwUIsClyyPX8kccQ2ufLJkNLC7Ix1bdKGkd2vtUBs1Q31xELUM= root@debian12-tpl001-lan"
files:
- /etc/ssh/ssh_host_rsa_key.pub
- /etc/ssh/ssh_host_rsa_key
- type: ssh_ecdsa
pub_key: "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPciQgbaKkLubW/I3mCTsc7L1Dy+2zMvPw045332vVUmPeKm6t7xAu7nzUnjhUOxFK4qev1wgnOhPibwXQrxHQ4= root@debian12-tpl001-lan"
files:
- /etc/ssh/ssh_host_ecdsa_key.pub
- /etc/ssh/ssh_host_ecdsa_key
- type: ssh_ed25519
pub_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKIRtv0xKbDGS7G2rz9RQWnk/OSizkimy8WFBhokqxXR root@debian12-tpl001-lan"
files:
- /etc/ssh/ssh_host_ed25519_key.pub
- /etc/ssh/ssh_host_ed25519_key

View File

@@ -0,0 +1,13 @@
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
# NOTE(retr0h): Templates no longer fail this lint rule.
# Uncomment if running old Molecule templates.
# truthy: disable

View File

@@ -0,0 +1,81 @@
# Unbound
This role install and configure an Unbound resolver.
It also install a prometheus exporter compiled from [letsencrypt/unbound_exporter](https://github.com/letsencrypt/unbound_exporter)
## Targets
- Debian
## Role variables
- ``unbound_interfaces``: list of interfaces Unbound has to listen on. If not specified, Unbound will listen on 0.0.0.0.
- ``unbound_authorized_cidrs``: list of authorized CIDRS to query the resolver. As Unbound rejects everything by default, if none is set, the resolver won't answer to anyone.
- ``unbound_threads``: number of threads Unbound runs on. (default: 1)
- ``unbound_cache_size``: size of Unbound cache, in Mb. (default: 100)
- ``unbound_zones``: dictionnary about zones that need to be forwarded to another DNS server. It contains info for every managed zone :
``name``: name of the zone
``forward_ip``: list of the servers to forward queries to
``private``: boolean, has to be specified for dummies zones (ex: .priv). It disables DNSSEC validation for thoses zones.
Zones that are not explicitely specified in forwards will be forwarded to root servers.
## Prometheus exporter
* For the exporter to work properly you need to run the following command on each resolver :
```
unbound-control-setup
```
* You also need to ensure that the "extended-statistics: yes" directive is in the conf (it is here).
* The exporter configuration can be change by modifying the systemd service template.
## Unbound logging
In order to enable query log, you need to do the following :
* Add the following directives to the config :
```
logfile: "/var/log/unbound/unbound.log"
log-time-ascii: yes
log-queries: yes
log-replies: yes # will log informations about the reply, slows response time.
```
* Add the following line in /etc/apparmor.d/usr.sbin.unbound (with the comma) :
```
/var/log/unbound/unbound.log rw,
```
* Run the following commands to create both directory and file for logging :
```
mkdir /var/log/unbound
touch /var/log/unbound/unbound.log
chown -R unbound:unbound /var/log/unbound
apparmor_parser -r /etc/apparmor.d/usr.sbin.unbound
```
* Restart unbound.
## Example
In this example, we specify to forward queries for domain aaa.com to xxx.xxx.xxx.xxx, bbb.com to yyy.yyy.yyy.yyy or xxx.xxx.xxx.xxx as a failover, and requests for a private zone to zzz.zzz.zzz.zzz :
```yml
unbound_interfaces:
- "aaa.aaa.aaa.aaa"
unbound_authorized_cidrs:
- "aaa.aaa.aaa.0/24"
- "bbb.bbb.bbb.bbb/32"
unbound_threads: 2
unbound_cache_size: 1536
unbound_zones:
- name: "aaa.com"
forward_ip:
- xxx.xxx.xxx.xxx
- name: "bbb.com"
forward_ip:
- yyy.yyy.yyy.yyy
- xxx.xxx.xxx.xxx
- name: "mysuperprivatezone.priv"
forward_ip:
- zzz.zzz.zzz.zzz
private: true
```

View File

@@ -0,0 +1,6 @@
---
unbound_interfaces:
- "0.0.0.0"
unbound_threads: 1
unbound_cache_size: 100
unbound_loglevel: 1

View File

@@ -0,0 +1,10 @@
/var/log/unbound/*.log {
weekly
missingok
rotate 52
compress
notifempty
postrotate
/usr/sbin/unbound-control log_reopen
endscript
}

Binary file not shown.

View File

@@ -0,0 +1,15 @@
---
- name: Daemon reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart unbound exporter
ansible.builtin.systemd_service:
name: unbound_exporter
state: restarted
- name: Reload Unbound
ansible.builtin.systemd_service:
name: unbound
state: reloaded

View File

@@ -0,0 +1,76 @@
---
- name: Set specific variables for distributions
ansible.builtin.include_vars: "{{ item }}"
with_first_found:
- files:
- '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' # CentOS-6.5
- '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml' # RedHat-6.5
- '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' # CentOS-6
- '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml' # RedHat-6
- '{{ ansible_distribution }}.yml' # CentOS
- '{{ ansible_os_family }}.yml' # RedHat
- 'default.yml'
- name: Enhance socket buffer size in UDP
ansible.posix.sysctl:
name: "{{ item }}"
value: 4194304
reload: true
with_items:
- "net.core.rmem_max"
- "net.core.wmem_max"
- name: Install Unbound
ansible.builtin.apt:
name: "{{ unbound_package }}"
update_cache: true
state: present
when: ansible_os_family == "Debian"
- name: Setup service configuration
ansible.builtin.template:
src: unbound.conf.j2
dest: /etc/unbound/unbound.conf.d/custom.conf
owner: unbound
group: unbound
mode: "0755"
notify: Reload Unbound
- name: Set permission on conf directory
ansible.builtin.file:
path: /etc/unbound
owner: unbound
group: unbound
recurse: true
- name: Ensure service is enabled at boot and started
ansible.builtin.systemd_service:
name: "unbound"
enabled: true
state: started
- name: Deploy unbound exporter
ansible.builtin.copy:
src: unbound_exporter
dest: /usr/local/bin/unbound_exporter
mode: "0755"
- name: Deploy unbound exporter service
ansible.builtin.template:
src: unbound_exporter.service.j2
dest: /etc/systemd/system/unbound_exporter.service
owner: root
group: root
mode: "0644"
notify:
- Daemon reload
- Restart unbound exporter
- name: Deploy logrotate config file
ansible.builtin.copy:
src: logrotate
dest: /etc/logrotate.d/unbound
owner: root
group: root
mode: "0644"

View File

@@ -0,0 +1,58 @@
## {{ ansible_managed }}
server:
verbosity: {{unbound_loglevel }}
extended-statistics: yes
do-udp: yes
do-tcp: yes
do-ip6: no
num-threads: {{ unbound_threads }}
msg-cache-slabs: {{ unbound_threads }}
rrset-cache-slabs: {{ unbound_threads }}
infra-cache-slabs: {{ unbound_threads }}
key-cache-slabs: {{ unbound_threads }}
rrset-cache-size: {{ unbound_cache_size }}m
key-cache-size: {{ ((unbound_cache_size/2) | int) }}m
msg-cache-size: {{ ((unbound_cache_size/2) | int) }}m
neg-cache-size: {{ ((unbound_cache_size/4) | int) }}m
prefetch: yes
cache-min-ttl: 300
cache-max-ttl: 86400
outgoing-range: 8192
num-queries-per-thread: 4096
so-rcvbuf: 4m
so-sndbuf: 4m
so-reuseport: yes
rrset-roundrobin: yes
val-log-level:1
{% for iface in unbound_interfaces %}
interface: {{ iface }}
{% endfor %}
{% for cidr in unbound_authorized_cidrs %}
access-control: {{ cidr }} allow
{% endfor %}
{% if unbound_zones is defined %}
{% for zone in unbound_zones %}
{% if zone.private is defined and zone.private %}
domain-insecure: "{{ zone.name }}"
{% endif %}
{% endfor %}
{% for zone in unbound_zones %}
forward-zone:
name: "{{ zone.name }}"
{% for fwa in zone.forward_ip %}
forward-addr: {{ fwa }}
{% endfor -%}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,13 @@
[Unit]
Description=Unbound exporter for prometheus
Documentation=https://github.com/letsencrypt/unbound_exporter
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
ExecStart=/usr/local/bin/unbound_exporter -unbound.host="unix:///run/unbound.ctl"
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,3 @@
---
unbound_package: "unbound"

View File

@@ -0,0 +1,30 @@
# Installation et configuration de varnish
## Variables
* varnish_listen_host: Adresse IP sur laquelle varnish écoute. (Default: 0.0.0.0)
* varnish_listen_port: Port sur lequel varnish écoute. (Default : 80)
* varnish_maxmemory: Mémoire maximum occupée par varnish. (Default : 3G)
* varnish_acl_purge_hosts: Adresse IP autorisée à effectuer des requêtes PURGE. (Default 127.0.0.1)
* varnish_health_check: URL de healthcheck des applications qui ne seront pas cachées. (Default : /healthcheck$)
* varnish_backend_servers: Liste des serveurs de backends.
```
varnish_backend_servers:
docker-hpv008-stg:
host: "10.13.100.8"
port: "80"
docker-hpv009-stg:
host: "10.13.100.9"
port: "80"
```
## Fonctionnalités
* Désactive les services systemd fournit de base pour Varnish et Varnishncsa.
* Dépose et active des services custom pour Varnish et Varnishncsa qui permettent la personnalisation des paramètres de lancement.
* Gère la configuration VCL.
* Dépose les configurations logrotate et rsyslog.
## Modification de configuration
```
vim roles/varnish/templates/default.vcl.j2
ansible-playbook -i hosts-stg -l varnish_stg -t config playbooks/varnish.yml
```

View File

@@ -0,0 +1,15 @@
---
varnish_listen_host: '0.0.0.0'
varnish_listen_port: 6081
varnish_maxmemory: '3G'
varnish_acl_purge_hosts:
- 127.0.0.1
varnish_health_check: "/healthcheck$"
varnish_varnishncsa_custom_items:
- domain1
- domain2
- domain3

Some files were not shown because too many files have changed in this diff Show More