Compare commits

...

29 Commits

Author SHA1 Message Date
9bfc5a596b ansible : add alloy role 2026-02-24 10:35:08 +00:00
fc45817240 adding argoCD 2026-02-05 09:25:52 +01:00
cf5c5a076e ansible: add vim role 2025-09-15 13:59:36 +02:00
aed7efed71 Ajouter postgresql/urls.md 2025-08-03 21:50:31 +02:00
e4359ca7f9 feat(misc): git 2025-07-31 16:36:41 +02:00
ea32d22dd3 adding misc commands 2025-07-16 16:58:06 +02:00
3e8afbbfb6 kubernetes: update cli 2025-06-05 14:35:12 +02:00
eeeaa55aae kubernetes: update cli 2025-06-04 17:21:39 +02:00
ce39cc3736 add bash section 2025-06-04 09:40:29 +02:00
f6ac190c95 adding pulumi section 2025-06-04 09:34:44 +02:00
7473da14f4 feat(terraform): adding cli file 2025-06-02 14:41:13 +02:00
e5738c5c3f fluxcd : adding repo example with some apps 2025-05-28 16:00:46 +02:00
00a5e56c27 kubernetes : add kubectl krew 2025-05-28 14:14:07 +02:00
5620165b33 terraform: add applications module 2025-05-28 12:02:11 +02:00
7bf1db48f7 terraform: del applications module 2025-05-28 12:01:41 +02:00
dc24dae779 terraform: add buckets module 2025-05-28 12:01:19 +02:00
62148d2af5 terraform: add applications module 2025-05-28 12:00:51 +02:00
d338ccc850 fix unicode 2025-05-28 11:54:17 +02:00
0d5f9eff7e add gitlab section 2025-05-28 11:53:16 +02:00
e96e220869 add postgresql role 2025-05-28 11:31:55 +02:00
24465cb6f9 add mongodb role 2025-05-28 11:23:06 +02:00
dd1900fffe add unbound_resolver role 2025-05-28 11:15:47 +02:00
6679277418 ssh_host_keys 2025-05-28 11:09:56 +02:00
f0a78236c7 linting vault role 2025-05-27 10:04:05 +02:00
50edb313f7 linting varnish role 2025-05-27 10:03:02 +02:00
0b37136364 linting rabbitmq ansible role 2025-05-27 09:58:15 +02:00
44e2674cc4 linting role opcache 2025-05-27 09:51:30 +02:00
0bb618a4a3 linting ansible neo4j 2025-05-27 09:49:05 +02:00
1ff53c6424 linting ansible/roles/filebrowser 2025-05-27 09:46:59 +02:00
212 changed files with 22533 additions and 121 deletions

View File

@@ -0,0 +1,14 @@
---
alloy_version: "1.13.2"
alloy_log_level: "info"
alloy_username: "alloy"
alloy_groupname: "alloy"
alloy_uid: "1001"
alloy_gid: "1001"
alloy_config_directory: "/etc/alloy"
alloy_config_file: "{{ alloy_config_directory }}/config.alloy"
alloy_working_directory: "/var/lib/alloy"
alloy_binary_dir: "/usr/local/bin"
alloy_run_args: "--stability.level=experimental --server.http.listen-addr='0.0.0.0:12345'"
alloy_restart_on_upgrade: true

View File

@@ -0,0 +1,10 @@
---
- name: Restart alloy
ansible.builtin.systemd_service:
name: "alloy.service"
state: restarted
- name: Systemd reload
ansible.builtin.systemd_service:
daemon_reload: true

View File

@@ -0,0 +1,85 @@
---
- name: Download and install Alloy binary
ansible.builtin.unarchive:
src: "https://github.com/grafana/alloy/releases/download/v{{ alloy_version }}/alloy-linux-amd64.zip"
dest: "{{ alloy_binary_dir }}"
remote_src: true
tags: install
- name: Rename binary
ansible.builtin.copy:
src: "{{ alloy_binary_dir }}/alloy-linux-amd64"
dest: "{{ alloy_binary_dir }}/alloy"
remote_src: true
mode: 0755
tags: install
- name: Create Alloy group
ansible.builtin.group:
name: "{{ alloy_groupname }}"
gid: "{{ alloy_gid }}"
tags: install
- name: Create Alloy user
ansible.builtin.user:
name: "{{ alloy_username }}"
uid: "{{ alloy_uid }}"
group: "{{ alloy_groupname }}"
shell: "/bin/false"
tags: install
- name: Ensure config directory exists
ansible.builtin.file:
path: "{{ alloy_config_directory }}"
owner: "{{ alloy_username }}"
group: "{{ alloy_groupname }}"
state: directory
mode: "0755"
tags: install,config
- name: Ensure working directory exists
ansible.builtin.file:
path: "{{ alloy_working_directory }}"
state: directory
owner: "{{ alloy_username }}"
group: "{{ alloy_groupname }}"
mode: "0755"
tags: install
- name: Deploy Alloy config
ansible.builtin.template:
src: "config.j2"
dest: "{{ alloy_config_directory }}/config.alloy"
owner: "{{ alloy_username }}"
group: "{{ alloy_groupname }}"
mode: "0644"
notify: Restart alloy
tags: install,config
- name: Deploy Alloy default file
ansible.builtin.template:
src: "default.j2"
dest: "/etc/default/alloy"
owner: "root"
group: "root"
mode: 0644
notify: Restart alloy
tags: install,config
- name: Deploy Alloy systemd config
ansible.builtin.template:
src: "systemd_service.j2"
dest: "/etc/systemd/system/alloy.service"
owner: "root"
group: "root"
mode: 0644
tags: install
- name: Start and enable Alloy service
ansible.builtin.systemd_service:
name: "alloy.service"
state: started
enabled: true
daemon_reload: true
tags: install

View File

@@ -0,0 +1,34 @@
otelcol.receiver.filelog "file" {
include = ["/in/*.log"]
operators = [{
type = "regex_parser",
regex = `^(?P<timestamp>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{2}:\d{2}) (?P<msg>.*)$`,
timestamp = {
parse_from = "attributes.timestamp",
layout = "%Y-%m-%dT%H:%M:%S%j",
},
message = {
parse_from = "attributes.msg",
},
},{
type = "remove",
field = "attributes.timestamp",
}]
output {
logs = [otelcol.exporter.syslog.outsyslog.input]
}
}
otelcol.exporter.file "outfile" {
path = "/out/output.log"
}
otelcol.exporter.syslog "outsyslog" {
endpoint = "slaithier-promax14"
}
otelcol.exporter.debug "default" {}
logging {
level = "{{ alloy_log_level }}"
format = "logfmt"
}

View File

@@ -0,0 +1,8 @@
# The configuration file holding the Grafana Alloy configuration.
CONFIG_FILE="{{ alloy_config_file }}"
# User-defined arguments to pass to the run command.
CUSTOM_ARGS="{{ alloy_run_args }}"
# Restart on system upgrade. Defaults to true.
RESTART_ON_UPGRADE={{ alloy_restart_on_upgrade }}

View File

@@ -0,0 +1,19 @@
[Unit]
Description=Vendor-neutral programmable observability pipelines.
Documentation=https://grafana.com/docs/alloy/
Wants=network-online.target
After=network-online.target
[Service]
Restart=always
User={{ alloy_username }}
Group={{ alloy_groupname }}
Environment=HOSTNAME=%H
EnvironmentFile=/etc/default/alloy
WorkingDirectory={{ alloy_working_directory }}
ExecStart={{ alloy_binary_dir }}/alloy run $CUSTOM_ARGS --storage.path={{ alloy_working_directory }} $CONFIG_FILE
ExecReload=/usr/bin/env kill -HUP $MAINPID
TimeoutStopSec=20s
[Install]
WantedBy=multi-user.target

View File

@@ -1,10 +1,10 @@
--- ---
fb_version: "2.32.0" filebrowser_version: "2.32.0"
fb_user: "admin" filebrowser_user: "admin"
fb_group: "admin" filebrowser_group: "admin"
fb_port: "8080" filebrowser_port: "8080"
fb_address: "0.0.0.0" filebrowser_address: "0.0.0.0"
fb_log_dir: "/var/log/filebrowser" filebrowser_log_dir: "/var/log/filebrowser"
fb_config_dir: "/etc/filebrowser" filebrowser_config_dir: "/etc/filebrowser"
fb_root: "/nas/{{ env_long }}" filebrowser_root: "/nas/{{ env_long }}"

View File

@@ -4,10 +4,11 @@
ansible.builtin.file: ansible.builtin.file:
path: "/tmp/filebrowser" path: "/tmp/filebrowser"
state: directory state: directory
mode: "0755"
- name: Download Filebrowser binary from github - name: Download Filebrowser binary from github
ansible.builtin.unarchive: ansible.builtin.unarchive:
src: "https://github.com/filebrowser/filebrowser/releases/download/v{{ fb_version }}/linux-amd64-filebrowser.tar.gz" src: "https://github.com/filebrowser/filebrowser/releases/download/v{{ filebrowser_version }}/linux-amd64-filebrowser.tar.gz"
dest: "/tmp/filebrowser" dest: "/tmp/filebrowser"
remote_src: true remote_src: true
@@ -17,24 +18,24 @@
dest: /usr/local/bin/filebrowser dest: /usr/local/bin/filebrowser
owner: root owner: root
group: root group: root
mode: 0755 mode: "0755"
remote_src: true remote_src: true
- name: Create configuration folder - name: Create configuration folder
ansible.builtin.file: ansible.builtin.file:
path: "{{ fb_config_dir }}" path: "{{ filebrowser_config_dir }}"
owner: "{{ fb_user }}" owner: "{{ filebrowser_user }}"
group: "{{ fb_group }}" group: "{{ filebrowser_group }}"
mode: 0755 mode: "0755"
state: directory state: directory
- name: Deploying configuration file - name: Deploying configuration file
ansible.builtin.template: ansible.builtin.template:
src: "settings.json.j2" src: "settings.json.j2"
dest: "{{ fb_config_dir }}/settings.json" dest: "{{ filebrowser_config_dir }}/settings.json"
owner: "{{ fb_user }}" owner: "{{ filebrowser_user }}"
group: "{{ fb_group }}" group: "{{ filebrowser_group }}"
mode: 0644 mode: "0644"
notify: Restart notify: Restart
- name: Deploying service file - name: Deploying service file
@@ -43,12 +44,12 @@
dest: "/lib/systemd/system/filebrowser.service" dest: "/lib/systemd/system/filebrowser.service"
owner: "root" owner: "root"
group: "root" group: "root"
mode: 0644 mode: "0644"
notify: notify:
- Daemon reload - Daemon reload
- name: Running handlers for daemon reload - name: Running handlers for daemon reload
meta: flush_handlers ansible.builtin.meta: flush_handlers
- name: Enabling filebrowser service - name: Enabling filebrowser service
ansible.builtin.systemd: ansible.builtin.systemd:
@@ -58,10 +59,10 @@
- name: Create log folder - name: Create log folder
ansible.builtin.file: ansible.builtin.file:
path: "{{ fb_log_dir }}" path: "{{ filebrowser_log_dir }}"
owner: root owner: root
group: adm group: adm
mode: 0755 mode: "0755"
state: directory state: directory
- name: Deploy rsyslog config - name: Deploy rsyslog config
@@ -70,7 +71,7 @@
dest: /etc/rsyslog.d/filebrowser.conf dest: /etc/rsyslog.d/filebrowser.conf
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
- name: Deploy logrotate config - name: Deploy logrotate config
ansible.builtin.copy: ansible.builtin.copy:
@@ -78,12 +79,12 @@
dest: /etc/logrotate.d/filebrowser dest: /etc/logrotate.d/filebrowser
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
- name: Create custom branding folder - name: Create custom branding folder
ansible.builtin.file: ansible.builtin.file:
path: "/etc/filebrowser/branding/img/icons" path: "/etc/filebrowser/branding/img/icons"
owner: "{{ fb_user }}" owner: "{{ filebrowser_user }}"
group: "{{ fb_group }}" group: "{{ filebrowser_group }}"
mode: 0755 mode: "0755"
state: directory state: directory

View File

@@ -0,0 +1,27 @@
# Installation et configuration de mongoDB
## Configuration
La configuration se fait via le fichier ansible/group_vars/{{ nom_du_groupe }}.
## Variables
* mongodb_replicaset_name : Nom du replicaset configurés entre les serveurs. (Exemple: mongodb-stg)
## Fonctionnalités
* Installe les dépendances du rôle et de mongodb, le dépot MongoDB 6, les paquets mongodb.
* Déploie les outils de backups.
* Déploie la configuration relative à la supervision (check, fichier d'authentification et rôle custom).
## Tags
* install : installe mongodb, la supervision, les backups et les utilisateurs.
* supervision : met à jour les éléments relatifs à la supervision (check, configuration, rôle custom).
* backup: déploie les outils nécessaires aux sauvegardes (scripts, role, utilisateur, cron).
## Modification de configuration
* Mise à jour des éléments de supervision :
```
ansible-playbook -i hosts-stg playbooks/mongodb.yml -t supervision -l mongodb_stg
```

View File

@@ -0,0 +1,5 @@
---
mongodb_nfs_server_stg: ""
mongodb_nfs_server_prd: ""
mongodb_cmk_url: ""

View File

@@ -0,0 +1,9 @@
[Unit]
Description=Disable Transparent Huge Pages (THP)
DefaultDependencies=no
After=sysinit.target local-fs.target
[Service]
Type=oneshot
ExecStart=/bin/sh -c 'echo never | tee /sys/kernel/mm/transparent_hugepage/enabled > /dev/null'
[Install]
WantedBy=basic.target

View File

@@ -0,0 +1,2 @@
vm.swappiness=1
vm.max_map_count=128000

View File

@@ -0,0 +1,13 @@
/var/log/mongodb/mongod.log {
daily
rotate 7
missingok
compress
delaycompress
notifempty
create 640 mongodb mongodb
sharedscripts
postrotate
/bin/kill -SIGUSR1 `cat /var/run/mongodb/mongod.pid 2>/dev/null` >/dev/null 2>&1
endscript
}

View File

@@ -0,0 +1,15 @@
---
- name: Systemd daemon_reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart mongodb
ansible.builtin.systemd_service:
name: mongod
state: restarted
- name: Restart pbm-agent
ansible.builtin.systemd_service:
name: pbm-agent
state: restarted

View File

@@ -0,0 +1,124 @@
---
- name: Install dependencies
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg2
- lsb-release
- nfs-common
tags: install,backup
- name: Ensure nas directory exists
ansible.builtin.file:
path: /nas
state: directory
owner: root
group: root
mode: "0755"
tags: install,backup
- name: Create backup custom role
community.mongodb.mongodb_role:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
replica_set: "{{ mongodb_replicaset_name }}"
database: "admin"
name: "pbmAnyAction"
privileges:
- resource:
db: ""
collection: ""
actions:
- "anyAction"
roles:
- role: "backup"
db: "admin"
- role: "clusterMonitor"
db: "admin"
- role: "restore"
db: "admin"
- role: "readWrite"
db: "admin"
state: present
tags: install,backup
- name: Create backup user
community.mongodb.mongodb_user:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
replica_set: "{{ mongodb_replicaset_name }}"
database: "admin"
name: "backup"
password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/backup:password') }}"
roles: "pbmAnyAction"
auth_mechanism: "SCRAM-SHA-256"
state: "present"
update_password: on_create
tags: install,backup
- name: Add nas mounting to fstab
ansible.posix.mount:
src: "{{ mongodb_nfs_server_stg }}:/data/shares/mongodb"
path: "/nas"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: present
when: dbenv = "stg"
tags: install,backup,nfs
- name: Add nas mounting to fstab
ansible.posix.mount:
src: "{{ mongodb_nfs_server_prd }}:/data/shares/mongodb"
path: "/nas"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: present
when: dbenv = "prd"
tags: install,backup,nfs
- name: Ensure scripts directory exists
ansible.builtin.file:
path: /data/scripts
state: directory
owner: root
group: root
mode: "0755"
tags: install,backup
- name: Deploy backup script
ansible.builtin.template:
src: mongodb-dump-full.sh.j2
dest: /data/scripts/mongodb-dump-full.sh
owner: root
group: root
mode: "0750"
tags: install,backup
- name: Add cron to trigger backup
ansible.builtin.cron:
name: "mongodb-dump-full"
weekday: "*"
minute: "0"
hour: "02"
user: root
job: "/data/scripts/mongodb-dump-full.sh -r 14 -d /nas -c"
cron_file: mongodb-dump-full
disabled: true
tags: install,backup
- name: Add MAILTO variable to cronfile
community.general.cronvar:
name: MAILTO
value: "''"
cron_file: mongodb-dump-full
state: present
tags: install,backup
- name: Add check batch conf to checkmk
ansible.builtin.lineinfile:
path: /etc/check_mk/mrpe.cfg
line: "#script_mongodb-dump-databases.sh /usr/local/nagios/plugins/check_batch mongodb-dump-full.sh 129600"
state: present
tags: install,backup

View File

@@ -0,0 +1,128 @@
---
- name: Install requirements
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg
- python3-pip
tags: install,conf,users
- name: Installing pymongo via pip
ansible.builtin.pip:
name:
- pymongo
tags: install,conf,users
- name: Deploy service to disable THP at boot
ansible.builtin.copy:
src: disable-thp.service
dest: /etc/systemd/system/disable-thp.service
owner: root
group: root
mode: "0755"
notify: Systemd daemon_reload
tags: install
- name: Enable disable-thp service
ansible.builtin.systemd:
name: disable-thp
enabled: true
masked: false
tags: install
- name: Deploy sysctl conf (max_map_count, swappiness)
ansible.builtin.copy:
src: local.conf
dest: /etc/sysctl.d/local.conf
owner: root
group: root
mode: "0644"
tags: install,conf
- name: Get mongodb.com gpg key
ansible.builtin.get_url:
url: https://pgp.mongodb.com/server-7.0.asc
dest: /usr/share/keyrings/mongodb-server-7.0.asc
owner: root
group: root
mode: "0644"
tags: install
- name: Add mongodb.com repository
ansible.builtin.apt_repository:
repo: "deb [ signed-by=/usr/share/keyrings/mongodb-server-7.0.asc] http://repo.mongodb.org/apt/debian bullseye/mongodb-org/7.0 main"
state: present
tags: install
- name: Install mongodb
ansible.builtin.apt:
name: mongodb-org
state: present
tags: install
- name: Holding mongodb packages
ansible.builtin.dpkg_selections:
name: "{{ item }}"
selection: hold
with_items:
- mongodb-org
- mongodb-org-database
- mongodb-org-server
- mongodb-mongosh
- mongodb-org-mongos
- mongodb-org-tools
tags: install
- name: Ensure permissions are correct on /var/lib/mongodb
ansible.builtin.file:
path: /var/lib/mongodb
owner: mongodb
group: mongodb
mode: "0755"
tags: install
- name: Start and enable mongodb service
ansible.builtin.systemd:
name: mongod
state: started
enabled: true
tags: install
- name: Deploy conf file
ansible.builtin.template:
src: mongod.conf.j2
dest: /etc/mongod.conf
owner: root
group: root
mode: "0644"
tags: install,conf
notify: Restart mongodb
- name: Deploy keyFile for auth in cluster
ansible.builtin.template:
src: mongo-keyfile.j2
dest: /etc/mongo-keyfile
owner: mongodb
group: mongodb
mode: "0400"
tags: install
- name: Deploy logrotate conf file
ansible.builtin.copy:
src: logrotate.conf
dest: /etc/logrotate.d/mongodb
owner: root
group: root
mode: "0644"
tags: install
- name: Create replicaset
community.mongodb.mongodb_replicaset:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
login_host: localhost
replica_set: "{{ mongodb_replicaset_name }}"
members: "{{ mongodb_replicaset_members }}"
tags: install

View File

@@ -0,0 +1,13 @@
---
- name: Include install tasks
ansible.builtin.include_tasks: install.yml
tags: install
- name: Include supervision tasks
ansible.builtin.include_tasks: supervision.yml
tags: install,supervision
- name: Include backup tasks
ansible.builtin.include_tasks: backup.yml
tags: install,backup

View File

@@ -0,0 +1,114 @@
---
- name: Deploy checkmk conf template
ansible.builtin.template:
src: mk_mongodb.cfg.j2
dest: /etc/check_mk/mk_mongodb.cfg
owner: root
group: root
mode: "0644"
tags: install
- name: Deploy checkmk mongo check
ansible.builtin.get_url:
url: https://{{ mongodb_checkmk_url }}/check_mk/agents/plugins/mk_mongodb.py
dest: /usr/lib/check_mk_agent/plugins/
owner: root
group: root
mode: "0755"
tags: install
- name: Deploy supervision role
community.mongodb.mongodb_role:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
replica_set: "{{ mongodb_replicaset_name }}"
name: supervision
database: admin
privileges:
- resource:
db: ""
collection: "system.version"
actions:
- "collStats"
- resource:
db: ""
collection: "system.keys"
actions:
- "collStats"
- resource:
db: ""
collection: "system.roles"
actions:
- "collStats"
- resource:
db: ""
collection: "system.users"
actions:
- "collStats"
- resource:
db: ""
collection: "system.preimages"
actions:
- "collStats"
- resource:
db: ""
collection: "system.indexBuilds"
actions:
- "collStats"
- resource:
db: ""
collection: "system.rollback.id"
actions:
- "collStats"
- resource:
db: ""
collection: "system.views"
actions:
- "collStats"
- resource:
db: ""
collection: "system.replset"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.initialSyncId"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.election"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.oplogTruncateAfterPoint"
actions:
- "collStats"
- resource:
db: ""
collection: "replset.minvalid"
actions:
- "collStats"
roles:
- role: "clusterMonitor"
db: "admin"
- role: "readAnyDatabase"
db: "admin"
state: present
tags: install,supervision
- name: Create checkmk mongodb user
community.mongodb.mongodb_user:
login_user: "admin"
login_password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/admin:password') }}"
database: "admin"
name: "checkmk"
password: "{{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/checkmk:password') }}"
roles: "supervision"
auth_mechanism: "SCRAM-SHA-256"
replica_set: "{{ mongodb_replicaset_name }}"
state: "present"
update_password: on_create
tags: install,supervision

View File

@@ -0,0 +1,10 @@
[MONGODB]
# all keys are optional
host = 127.0.0.1
# host defaults to localhost
username = checkmk
password = {{ lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/checkmk:password') }}
auth_source = admin
# auth_source defaults to admin
auth_mechanism = DEFAULT

View File

@@ -0,0 +1 @@
{{ lookup('hashi_vault','ansible/data/mongodb/{{ env }}/keyFile:key') }}

View File

@@ -0,0 +1,44 @@
# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
processManagement:
pidFilePath: /var/run/mongodb/mongod.pid
# Where and how to store data.
storage:
dbPath: /var/lib/mongodb
# engine:
# wiredTiger:
# where to write logging data.
systemLog:
destination: file
logAppend: true
logRotate: reopen
path: /var/log/mongodb/mongod.log
# network interfaces
net:
port: 27017
bindIp: 0.0.0.0
# how the process runs
processManagement:
timeZoneInfo: /usr/share/zoneinfo
security:
keyFile: /etc/mongo-keyfile
authorization: enabled
#operationProfiling:
replication:
replSetName: {{ mongodb_replicaset_name }}
#sharding:
setParameter:
transactionLifetimeLimitSeconds: 3600

View File

@@ -0,0 +1,78 @@
#!/bin/bash
set -eu
DATE=$(date +%Y%m%d)
HOSTNAME=$(hostname -s)
STATUS=0
LOGFILE="/var/tmp/mongodb-dump-databases.log"
HOST="localhost"
COMPRESS=false
USER="backup"
PASSWORD="{{lookup('community.hashi_vault.hashi_vault', 'ansible/data/mongodb/{{ env }}/users/backup:password') }}"
DUMP_OPTIONS=""
touch ${LOGFILE}
#
# Fonctions
#
checkNas()
{
if [ ! -e "${BACKUPDIR}/.mount" ]; then
echo "${BACKUPDIR} not mounted. Backup aborted." | tee -a ${LOGFILE}
exit 1
fi
}
usage()
{
echo "$0 -r <retention> -d <repertoire> -c (compression)"
echo "Exemple : /data/scripts/mongodb-dump-full.sh -r 20 -d /nas -c"
}
#
# Main
#
while getopts "hcr:d:" option
do
case "${option}"
in
r)
RETENTION=${OPTARG};;
d)
BACKUPDIR=${OPTARG};;
c)
COMPRESS=true;;
h | *)
usage
exit 1;;
esac
done
echo "Lancement du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}
# check if the node is secondary
SEC=$(mongosh --host=${HOST} --authenticationDatabase admin --username ${USER} --password ${PASSWORD} --eval 'rs.hello().secondary' --quiet)
if [ ! "${SEC}" == "true" ]; then
echo "$(date +%s)|2|Node is not seconday ${LOGFILE}" > /var/tmp/batch."$(basename $0)"
exit 0
fi
[ -d "${BACKUPDIR}" ] || mkdir -p "${BACKUPDIR}"
if [ "${COMPRESS}" ]; then
DUMP_OPTIONS="${DUMP_OPTIONS} --authenticationDatabase=admin --username=${USER} --password=${PASSWORD} --gzip"
else
DUMP_OPTIONS="${DUMP_OPTIONS} --authenticationDatabase=admin --username=${USER} --password=${PASSWORD}"
fi
# dump
mongodump -v --host=${HOST} ${DUMP_OPTIONS} --archive="${BACKUPDIR}/${DATE}-${HOSTNAME}.gz" |tee -a ${LOGFILE}
STATUS=$?
# output in statusfile for checkmk
echo "$(date +%s)|${STATUS}|Check log file ${LOGFILE}" > /var/tmp/batch."$(basename "$0")"
echo "Fin du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}

View File

@@ -1,6 +1,6 @@
--- ---
- name: restart neo4j - name: Restart neo4j
ansible.builtin.systemd_service: ansible.builtin.systemd_service:
name: neo4j.service name: neo4j.service
state: restarted state: restarted

View File

@@ -14,7 +14,7 @@
path: "/data/scripts" path: "/data/scripts"
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
state: directory state: directory
tags: backup tags: backup
@@ -24,7 +24,7 @@
dest: "/data/scripts/neo4j-dump-database.sh" dest: "/data/scripts/neo4j-dump-database.sh"
owner: root owner: root
group: root group: root
mode: 0750 mode: "0750"
tags: backup tags: backup
- name: Setting up backup cron - name: Setting up backup cron

View File

@@ -1,6 +1,6 @@
--- ---
- name: checking dependencies - name: Checking dependencies
ansible.builtin.apt: ansible.builtin.apt:
package: "{{ item }}" package: "{{ item }}"
state: present state: present
@@ -12,28 +12,28 @@
- nfs-common - nfs-common
tags: install tags: install
- name: adding neo4j pgp key - name: Adding neo4j pgp key
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://debian.neo4j.com/neotechnology.gpg.key" url: "https://debian.neo4j.com/neotechnology.gpg.key"
dest: "/usr/share/keyrings/neo4j.asc" dest: "/usr/share/keyrings/neo4j.asc"
mode: 0644 mode: "0644"
force: true force: true
tags: install tags: install
- name: adding repo - name: Adding repo
ansible.builtin.apt_repository: ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/neo4j.asc] https://debian.neo4j.com stable latest" repo: "deb [signed-by=/usr/share/keyrings/neo4j.asc] https://debian.neo4j.com stable latest"
update_cache: true update_cache: true
state: present state: present
tags: install tags: install
- name: install package - name: Install package
ansible.builtin.apt: ansible.builtin.apt:
package: "neo4j" package: "neo4j"
state: present state: present
tags: install tags: install
- name: holding package - name: Holding package
ansible.builtin.dpkg_selections: ansible.builtin.dpkg_selections:
name: neo4j name: neo4j
selection: hold selection: hold
@@ -51,6 +51,6 @@
dest: "/etc/neo4j/neo4j.conf" dest: "/etc/neo4j/neo4j.conf"
owner: neo4j owner: neo4j
group: neo4j group: neo4j
mode: 0660 mode: "0660"
tags: install,config tags: install,config
notify: restart neo4j notify: Restart neo4j

View File

@@ -1,7 +1,9 @@
--- ---
- include_tasks: install.yml - name: Include install tasks
ansible.builtin.include_tasks: install.yml
tags: install,config tags: install,config
- include_tasks: backup.yml - name: Include backup tasks
ansible.builtin.include_tasks: backup.yml
tags: backup tags: backup

View File

@@ -1,14 +1,14 @@
--- ---
- name: copy opcache script to server - name: Copy opcache script to server
ansible.builtin.copy: ansible.builtin.copy:
src: opcache.php src: opcache.php
dest: /data/services/web/default/opcache.php dest: /data/services/web/default/opcache.php
owner: www-data owner: www-data
group: www-data group: www-data
mode: 0644 mode: "0644"
- name: remove opcache tuning in www.conf pool - name: Remove opcache tuning in www.conf pool
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: "/etc/php/{{ php_version }}/fpm/pool.d/www.conf" path: "/etc/php/{{ php_version }}/fpm/pool.d/www.conf"
state: absent state: absent
@@ -22,11 +22,12 @@
path: /data/services/web/default/check/.online path: /data/services/web/default/check/.online
state: absent state: absent
- name: sleep for 6 seconds and continue with play - name: Sleep for 6 seconds and continue with play
ansible.builtin.wait_for: timeout=6 ansible.builtin.wait_for:
timeout: 6
delegate_to: localhost delegate_to: localhost
- name: restart php-fpm and apache - name: Restart php-fpm and apache
ansible.builtin.systemd: ansible.builtin.systemd:
name: "{{ item }}" name: "{{ item }}"
state: restarted state: restarted
@@ -38,10 +39,11 @@
ansible.builtin.file: ansible.builtin.file:
path: /data/services/web/default/check/.online path: /data/services/web/default/check/.online
state: touch state: touch
mode: '0777' mode: "0777"
owner: "{{ user }}" owner: "{{ user }}"
group: "{{ user }}" group: "{{ user }}"
- name: sleep for 3 seconds and continue with play - name: Sleep for 3 seconds and continue with play
ansible.builtin.wait_for: timeout=3 ansible.builtin.wait_for:
timeout: 3
delegate_to: localhost delegate_to: localhost

View File

@@ -0,0 +1,49 @@
# Installation et configuration de Postgresql
[TOC]
## Documentation
### Postgresql :
* [Postgresql Official Documentation](https://www.postgresql.org/docs/) (EN)
* [Postgresql Documentation Officielle](https://docs.postgresql.fr/) (FR)
### Modules Ansible :
* [Ansible Galaxy : Postgresql](https://galaxy.ansible.com/ui/repo/published/community/postgresql/) (EN)
## Configuration
### Variables
* postgresql_monitoring_(user/password) : Identifiants liés à la supervision et à pmm.
* postgresql_admin_role_attr_flags : Liste des rôles qu'on souhaite attribuer aux administrateurs. Défaut : CREATEDB,CREATEROLE,NOSUPERUSER.
* postgresql_pmm_server : Adresse du serveur PMM.
* postgresql_pmm_server_(username/password) : Identifiants utilisés pour se connecter au serveur PMM.
* postgresql_pmm_client_(username/password) : Identifiants utilisés pour se connecter au serveur Postgresql local.
* postgresql_users_networks : Liste des réseaux qui seront ajoutés au fichier pg_hba.conf pour les utilisateurs.
* postgresql_databases : Liste des bases de données à créer.
```
postgresql_databases:
- name: "testsla"
(optional) owner: "testsla"
schemas:
- name: "testsla_schema"
owner: "testsla"
### Tags
* install : Installe Postgresql et ses dépendances.
* config : Gère les configurations, créer les utilisateurs et effectue les tâches liées au monitoring.
* backup : Installe les composants nécessaires aux sauvegardes.
* monitoring : Installe et configure pmm-client.
* databases : Créer les bases de données et leur schémas.
## Fonctionnement du rôle
### Basique pour comprendre Postgresql
#### Rôles, utilisateurs et permissions
* Un utilisateur correspond à un rôle avec un droit de login.
* Tous les utilisateurs font partie du groupe "public".
#### Base de données, schémas et tables.
* Une base de données contient un ou plusieurs schémas qui contiennent une ou plusieurs tables.
* Les bases de données contiennent par défaut un schema "public" sur lequel le groupe "public" a les droits de lecture.

View File

@@ -0,0 +1,21 @@
---
postgresql_default_data_dir: "/var/lib/postgresql/17/main"
postgresql_pmm_client_username: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/{{ env }}/{{ postgresql_monitoring_user }}:pmm_client_username') }}"
postgresql_pmm_client_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/{{ env }}/{{ postgresql_monitoring_user }}:pmm_client_password') }}"
postgresql_pmm_server: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/server:pmm_server') }}"
postgresql_pmm_server_username: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/server:pmm_server_username') }}"
postgresql_pmm_server_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/pmm/server:pmm_server_password') }}"
postgresql_monitoring_user: "monitoring"
postgresql_monitoring_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/{{ env }}/monitoring:password') }}"
postgresql_backup_user: "backup"
postgresql_backup_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/{{ env }}/backup:password') }}"
postgresql_replication_user: "replica"
postgresql_replication_password: "{{ lookup('hashi_vault', 'ansible/data/postgresql/{{ env }}/replica:password') }}"
postgresql_admin_role_attr_flags: "CREATEDB,CREATEROLE,NOSUPERUSER"
postgresql_cmk_url: ""

View File

@@ -0,0 +1,17 @@
[Unit]
Description=PostgreSQL database server
Documentation=man:postgres(1)
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
User=postgres
ExecStart=/usr/lib/postgresql/16/bin/pg_ctl start -D /etc/postgresql/16/main/
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGINT
TimeoutSec=infinity
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,10 @@
---
- name: Daemon_reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart Postgres
ansible.builtin.systemd_service:
name: postgresql.service
state: restarted

View File

@@ -0,0 +1,90 @@
---
- name: Install dependencies
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg2
- lsb-release
- nfs-common
tags: install,config,backup
- name: Setting up pg_hba conf for backup user
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
users: backup
source: "127.0.0.1"
databases: all
method: scram-sha-256
create: true
become: true
become_user: postgres
tags: install,config,backup
- name: Creating backup user
community.postgresql.postgresql_user:
name: "{{ postgresql_backup_user }}"
password: "{{ postgresql_backup_password }}"
become: true
become_user: postgres
tags: install,config,backup
- name: Ensure needed directory exists
ansible.builtin.file:
path: "{{ item }}"
mode: "0755"
owner: root
group: root
state: directory
loop:
- "/data/scripts/"
- "/nas/"
tags: install,config,backup
- name: Setting up mount point for nas
ansible.posix.mount:
path: "/nas"
src: "{{ postgresql_nfs_server }}:/data/shares/postgresql"
fstype: "nfs4"
opts: "rw,noatime,nodiratime,_netdev"
state: mounted
tags: install,config,backup
- name: Deploying pgpass for backup user
ansible.builtin.template:
src: "pgpass-backup.j2"
dest: "/root/.pgpass"
owner: root
group: root
mode: "0600"
tags: install,config,backup
- name: Deploying backup script
ansible.builtin.template:
src: "postgresql-dump-full.sh.j2"
dest: "/data/scripts/postgresql-dump-full.sh"
owner: root
group: root
mode: "0700"
tags: install,config,backup
- name: Setting up cron for backup
ansible.builtin.cron:
name: "postgresql backup"
minute: "0"
hour: "14"
job: "/data/scripts/postgresql-dump-full.sh -r 10 -d /nas -c"
user: root
cron_file: postgresql-backup
state: present
disabled: true
tags: install,config,backup
- name: Adding line to mrpe.cfg
ansible.builtin.lineinfile:
path: "/etc/check_mk/mrpe.cfg"
regexp: "^#postgresql_dump"
line: "#postgresql_dump /usr/local/nagios/plugins/check_batch postgresql-dump-full.sh 129600"
tags: install,config,backup

View File

@@ -0,0 +1,33 @@
---
- name: Create databases
community.postgresql.postgresql_db:
name: "{{ item.name }}"
owner: "{{ item.owner | default('postgres') }}"
become: true
become_user: postgres
loop: "{{ postgresql_databases }}"
tags: databases
- name: Create schemas in databases
community.postgresql.postgresql_schema:
name: "{{ item.1.name }}"
db: "{{ item.0.name }}"
owner: "{{ item.1.owner | default('postgres') }}"
comment: "{{ item.comment | default('') }}"
become: true
become_user: postgres
loop: "{{ postgresql_databases | subelements('schemas') }}"
tags: databases
- name: Grant usage on new schemas to public role
community.postgresql.postgresql_privs:
database: "{{ item.0.name }}"
objs: "{{ item.1.name }}"
type: "schema"
privs: "USAGE"
role: "public"
become: true
become_user: postgres
loop: "{{ postgresql_databases | subelements('schemas') }}"
tags: databases

View File

@@ -0,0 +1,125 @@
---
- name: Install requirements
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- gnupg
- curl
- apt-transport-https
- debian-keyring
- python3-psycopg2
tags: install,conf
- name: Import postgres key
ansible.builtin.get_url:
url: "https://www.postgresql.org/media/keys/ACCC4CF8.asc"
dest: "/usr/share/keyrings/postgres.ACCC4CF8.asc"
mode: "0644"
force: true
tags: install
- name: Add Postgres repository
ansible.builtin.apt_repository:
filename: postgres
repo: "deb [signed-by=/usr/share/keyrings/postgres.ACCC4CF8.asc] https://apt.postgresql.org/pub/repos/apt bookworm-pgdg main"
tags: install,conf
- name: Install Postgresql
ansible.builtin.apt:
name: "{{ item }}"
state: present
tags: install,conf
with_items:
- postgresql
- postgresql-client
- libpq-dev
- name: Holding postgres packages
ansible.builtin.dpkg_selections:
name: "{{ item }}"
selection: hold
with_items:
- postgresql
- postgresql-client
- libpq-dev
- python3-psycopg2
tags: install,conf
- name: Deploy systemd service file
ansible.builtin.copy:
src: postgresql.service
dest: "/lib/systemd/system/postgresql.service"
mode: "0644"
owner: root
group: root
tags: install
notify:
- Daemon_reload
- Restart Postgres
- name: Deploy Postgresql config file
ansible.builtin.copy:
src: "postgresql.conf"
dest: "/etc/postgresql/16/main/postgresql.conf"
owner: postgres
group: postgres
mode: "0644"
tags: install,conf
notify: Restart Postgres
- name: Enable and start postgres service
ansible.builtin.systemd_service:
name: postgresql.service
state: started
enabled: true
- name: Setting up pg_hba conf for postgres
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: local
databases: all
users: postgres
method: peer
create: true
become: true
become_user: postgres
tags: install
- name: Setting up pg_hba conf for replica
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
databases: replication
source: "{{ item }}"
users: replica
method: scram-sha-256
create: true
become: true
become_user: postgres
with_items: "{{ postgresql_replication_networks }}"
tags: install
- name: Creating replica users
community.postgresql.postgresql_user:
name: "{{ postgresql_replication_user }}"
password: "{{ postgresql_replication_password }}"
role_attr_flags: "REPLICATION"
become: true
become_user: postgres
tags: install
- name: Setting up pg_hba conf for ILG/APP users
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
users: all
source: "{{ item }}"
databases: all
method: scram-sha-256
create: true
with_items: "{{ postgresql_users_networks }}"
become: true
become_user: postgres
tags: install

View File

@@ -0,0 +1,17 @@
---
- name: Import install tasks
ansible.builtin.include_tasks: install.yml
tags: install
- name: Import backup related tasks
ansible.builtin.include_tasks: backup.yml
tags: config,backup
- name: Import monitoring tasks
ansible.builtin.include_tasks: monitoring.yml
tags: config,monitoring,pmm_register
- name: Import database related tasks
ansible.builtin.include_tasks: databases.yml
tags: databases

View File

@@ -0,0 +1,77 @@
---
- name: Setting up pg_hba conf for monitoring users
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_default_data_dir }}/pg_hba.conf"
contype: host
source: 127.0.0.1
users: monitoring
databases: all
method: scram-sha-256
create: true
become: true
become_user: postgres
tags: config,monitoring
- name: Creating monitoring user
community.postgresql.postgresql_user:
name: "{{ postgresql_monitoring_user }}"
password: "{{ postgresql_monitoring_password }}"
become: true
become_user: postgres
tags: config,monitoring
- name: Granting privileges to monitoring user
community.postgresql.postgresql_privs:
database: postgres
type: group
roles: "{{ postgresql_monitoring_user }}"
objs: "pg_monitor"
state: present
become: true
become_user: postgres
tags: config,monitoring
- name: Deploying checkmk config file
ansible.builtin.template:
src: "postgres.cfg.j2"
dest: "/etc/check_mk/postgres.cfg"
owner: root
group: root
mode: "0644"
tags: config,monitoring
- name: Deploying checkmk mk_postgres.py
ansible.builtin.get_url:
url: "https://{{ postgres_cmk_url }}/check_mk/agents/plugins/mk_postgres.py"
dest: "/usr/lib/check_mk_agent/plugins/mk_postgres.py"
owner: root
group: root
mode: "0755"
tags: config,monitoring
- name: Installing percona tools repo
ansible.builtin.apt:
deb: https://repo.percona.com/apt/percona-release_latest.{{ ansible_distribution_release }}_all.deb
tags: config,monitoring
- name: Installation pmm2-client
ansible.builtin.apt:
update_cache: true
pkg: pmm2-client
state: present
tags: config,monitoring
- name: Register on pmm server
ansible.builtin.command:
cmd: pmm-admin config --server-insecure-tls --server-url=https://{{ postgresql_pmm_server_username }}:{{ postgresql_pmm_server_password }}@{{ postgresql_pmm_server }}:443
register: register_server
changed_when: register_server.rc != 0
tags: pmm_register
- name: Adding Postgresql to pmm
ansible.builtin.command:
cmd: pmm-admin add postgresql --username={{ postgresql_pmm_client_username }} --password={{ postgresql_pmm_client_password }}
register: add_server
changed_when: add_server.rc != 0
tags: pmm_register

View File

@@ -0,0 +1 @@
localhost:5432:*:{{ postgresql_backup_user }}:{{ postgresql_backup_password }}

View File

@@ -0,0 +1 @@
INSTANCE=127.0.0.1:{{ postgresql_monitoring_user }}:{{ postgresql_monitoring_password }}

View File

@@ -0,0 +1,92 @@
#!/bin/bash
set -eu
DATE=$(date '+%Y%m%d %H%M%S')
TODAY=$(date '+%Y%m%d')
HOSTNAME=$(hostname -s)
STATUS=0
LOGFILE="/data/log/scripts/postgresql-dump-databases.log"
PGSQL_HOST="localhost"
PGSQL_USER="{{ postgresql_backup_user }}"
COMPRESS=false
touch ${LOGFILE}
#
# Fonctions
#
checkNas()
{
if [ ! -e "${BACKUPDIR}/.mount" ]; then
echo "${BACKUPDIR} not mounted. Backup aborted." | tee -a ${LOGFILE}
exit 1
fi
}
usage()
{
echo "$0 -r <retention> -d <repertoire> -c (compression)"
echo "Exemple : /data/scripts/postgresql-dump-full.sh -r 20 -d /nas -c"
}
#
# Main
#
while getopts "hcr:d:" option
do
case "${option}"
in
r)
RETENTION=${OPTARG};;
d)
BACKUPDIR=${OPTARG};;
c)
COMPRESS=true;;
h | *)
usage
exit 1;;
esac
done
echo "Lancement du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}
mkdir -p "$BACKUPDIR"/postgresqldump/ | tee -a ${LOGFILE}
find "$BACKUPDIR"/postgresqldump/ -mindepth 1 -maxdepth 1 -type f -daystart -mtime +"${RETENTION}" -delete | tee -a ${LOGFILE}
# Ne marche pas quand le backup tourne sur un replica
#echo "[${DATE}] - Granting superuser to ${PGSQL_USER} user" | tee -a ${LOGFILE}
#/usr/bin/su - postgres -c "psql --command 'ALTER USER ${PGSQL_USER} WITH SUPERUSER'" | tee -a ${LOGFILE}
DB_LIST=$(/usr/bin/su - postgres -c "psql --csv --command 'select datname from pg_catalog.pg_database'")
for db in ${DB_LIST} ; do
if [ ! "$db" = "datname" ] && [ ! "$db" = "template0" ] ; then
echo "[${DATE}] - Dumping database : $db" | tee -a ${LOGFILE}
if [ $COMPRESS = true ] ; then
/usr/bin/pg_dump -Z gzip -f "${BACKUPDIR}"/postgresqldump/"${TODAY}"-"${HOSTNAME}"-"${db}".gz -U "${PGSQL_USER}" -w -h "${PGSQL_HOST}" "${db}" | tee -a ${LOGFILE}
STATUS=${PIPESTATUS[0]}
else
/usr/bin/pg_dump -f "${BACKUPDIR}"/postgresqldump/"${TODAY}"-"${HOSTNAME}"-"${db}".sql -U "${PGSQL_USER}" -w -h "${PGSQL_HOST}" "${db}" | tee -a "${LOGFILE}"
STATUS=${PIPESTATUS[0]}
fi
if [ ! ${STATUS} -eq 0 ]; then
echo "[${DATE}][CRIT] Dump of $db failed" | tee -a ${LOGFILE}
echo "[${DATE}] - Revoking superuser from ${PGSQL_USER} user" | tee -a ${LOGFILE}
/usr/bin/su - postgres -c "psql --command 'ALTER USER ${PGSQL_USER} WITH NOSUPERUSER'" | tee -a ${LOGFILE}
OUTPUT="${OUTPUT} $db"
break
fi
fi
done
# Ne marche pas quand le backup tourne sur un replica
#echo "[${DATE}] - Revoking superuser from ${PGSQL_USER} user" | tee -a ${LOGFILE}
#/usr/bin/su - postgres -c "psql --command 'ALTER USER ${PGSQL_USER} WITH NOSUPERUSER'" | tee -a ${LOGFILE}
STATUS=$?
# output in statusfile for checkmk
echo "$(date +%s)|${STATUS}|Check log file ${LOGFILE}" > /var/tmp/batch."$(basename "$0")"
echo "Fin du dump - Retention : ${RETENTION} - Repertoire : ${BACKUPDIR}" | tee -a ${LOGFILE}

View File

@@ -0,0 +1,823 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '{{ postgresql_default_data_dir }}' # use data in another directory
# (change requires restart)
#hba_file = '/etc/postgresql/16/main/pg_hba.conf' # host-based authentication file
hba_file = '{{ postgresql_default_data_dir }}/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '{{ postgresql_default_data_dir }}/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/16-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = {{ postgresql_max_connections | default("100") }} # (change requires restart)
#reserved_connections = 0 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#scram_iterations = 4096
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
#gss_accept_delegation = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is usually the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
#vacuum_buffer_usage_limit = 256kB # size of vacuum and analyze buffer access strategy ring;
# 0 to disable vacuum buffer access strategy;
# range 128kB to 16GB
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enables compression of full-page writes;
# off, pglz, lz4, zstd, or on
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Prefetching during recovery -
#recovery_prefetch = try # prefetch pages referenced in the WAL?
#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
# (change requires restart)
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_library = '' # library to use to archive a WAL file
# (empty string indicates archive_command should
# be used)
#archive_command = '' # command to use to archive a WAL file
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a WAL file switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived WAL file
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_presorted_aggregate = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#recursive_worktable_factor = 10.0 # range 0.001-1000000
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, jsonlog, syslog, and
# eventlog, depending on platform.
# csvlog and jsonlog require
# logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr, jsonlog,
# and csvlog into log files. Required
# to be on for csvlogs and jsonlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
#log_startup_progress_interval = 10s # Time between progress updates for
# long-running startup operations.
# 0 disables the feature, > 0 indicates
# the interval in milliseconds.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = 10min # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = on
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Europe/Paris'
# - Process Title -
cluster_name = '16/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Cumulative Query and Index Statistics -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_fetch_consistency = cache # cache, none, snapshot
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
#createrole_self_grant = '' # set and/or inherit
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Europe/Paris'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting
#icu_validation_level = warning # report ICU locale validation
# errors at the given level
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -7,30 +7,30 @@ rabbitmq_vhosts: ""
rabbitmq_plugins: "rabbitmq_management,rabbitmq_shovel,rabbitmq_prometheus,rabbitmq_shovel_management" rabbitmq_plugins: "rabbitmq_management,rabbitmq_shovel,rabbitmq_prometheus,rabbitmq_shovel_management"
rabbitmq_collect_statistics_interval: 30000 rabbitmq_collect_statistics_interval: 30000
#rabbitmq_exchanges: # rabbitmq_exchanges:
# - name: "myapp.topic" # - name: "myapp.topic"
# type: "topic" # type: "topic"
# vhost: "myapp" # vhost: "myapp"
# #
#rabbitmq_queues: # rabbitmq_queues:
# - name: "myapp.queue" # - name: "myapp.queue"
# vhost: "myapp" # vhost: "myapp"
# dead_letter_exchange: "" # dead_letter_exchange: ""
# dead_letter_routing_key: "myapp.dlq" # dead_letter_routing_key: "myapp.dlq"
# arguments: # arguments:
# x-queue-type: quorum # x-queue-type: quorum
# #
#rabbitmq_bindings: # rabbitmq_bindings:
# - name: "myapp.topic" # - name: "myapp.topic"
# vhost: "myapp" # vhost: "myapp"
# destination: "myapp.queue" # destination: "myapp.queue"
# destination_type: "queue" # destination_type: "queue"
# routing_key: "*" # routing_key: "*"
# #
#rabbitmq_app_users: # rabbitmq_app_users:
# - username: "myapp" # - username: "myapp"
# password: "{{ lookup('community.hashi_vault.hashi_vault','ansible/data/rabbitmq/{{ env }}/users/myapp:password') }}" # password: "{{ lookup('community.hashi_vault.hashi_vault','ansible/data/rabbitmq/{{ env }}/users/myapp:password') }}"
# vhost: "myapp" # vhost: "myapp"
# read_priv: "^myapp.*" # read_priv: "^myapp.*"
# write_priv: "^myapp.*" # write_priv: "^myapp.*"
# configure_priv: "^$" # configure_priv: "^$"

View File

@@ -4,7 +4,7 @@
ansible.builtin.template: ansible.builtin.template:
src: rabbitmq.conf.j2 src: rabbitmq.conf.j2
dest: /etc/rabbitmq/rabbitmq.conf dest: /etc/rabbitmq/rabbitmq.conf
mode: 0644 mode: "0644"
owner: rabbitmq owner: rabbitmq
group: rabbitmq group: rabbitmq
notify: Restart Rabbitmq notify: Restart Rabbitmq
@@ -14,7 +14,7 @@
ansible.builtin.template: ansible.builtin.template:
src: rabbitmq-env.conf.j2 src: rabbitmq-env.conf.j2
dest: /etc/rabbitmq/rabbitmq-env.conf dest: /etc/rabbitmq/rabbitmq-env.conf
mode: 0644 mode: "0644"
owner: rabbitmq owner: rabbitmq
group: rabbitmq group: rabbitmq
notify: Restart Rabbitmq notify: Restart Rabbitmq
@@ -79,7 +79,7 @@
community.rabbitmq.rabbitmq_queue: community.rabbitmq.rabbitmq_queue:
login_user: "{{ rabbitmq_admin_username }}" login_user: "{{ rabbitmq_admin_username }}"
login_password: "{{ rabbitmq_admin_password }}" login_password: "{{ rabbitmq_admin_password }}"
state: "{{ item.state | default('present')}}" state: "{{ item.state | default('present') }}"
vhost: "{{ item.vhost }}" vhost: "{{ item.vhost }}"
name: "{{ item.name }}" name: "{{ item.name }}"
durable: "{{ item.durable | default(true) }}" durable: "{{ item.durable | default(true) }}"
@@ -93,7 +93,7 @@
community.rabbitmq.rabbitmq_binding: community.rabbitmq.rabbitmq_binding:
login_user: "{{ rabbitmq_admin_username }}" login_user: "{{ rabbitmq_admin_username }}"
login_password: "{{ rabbitmq_admin_password }}" login_password: "{{ rabbitmq_admin_password }}"
state: "{{ item.state |default('present') }}" state: "{{ item.state | default('present') }}"
vhost: "{{ item.vhost }}" vhost: "{{ item.vhost }}"
name: "{{ item.name }}" name: "{{ item.name }}"
destination: "{{ item.destination }}" destination: "{{ item.destination }}"

View File

@@ -15,7 +15,7 @@
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://dl.cloudsmith.io/public/rabbitmq/rabbitmq-server/gpg.9F4587F226208342.key" url: "https://dl.cloudsmith.io/public/rabbitmq/rabbitmq-server/gpg.9F4587F226208342.key"
dest: "/usr/share/keyrings/rabbitmq.9F4587F226208342.asc" dest: "/usr/share/keyrings/rabbitmq.9F4587F226208342.asc"
mode: 0644 mode: "0644"
force: true force: true
tags: install tags: install
@@ -23,7 +23,7 @@
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-erlang.E495BB49CC4BBE5B.key" url: "https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-erlang.E495BB49CC4BBE5B.key"
dest: "/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg" dest: "/usr/share/keyrings/rabbitmq.E495BB49CC4BBE5B.gpg"
mode: 0644 mode: "0644"
force: true force: true
tags: install tags: install
@@ -31,7 +31,7 @@
ansible.builtin.get_url: ansible.builtin.get_url:
url: "https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-server.9F4587F226208342.key" url: "https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-server.9F4587F226208342.key"
dest: "/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg" dest: "/usr/share/keyrings/rabbitmq.9F4587F226208342.gpg"
mode: 0644 mode: "0644"
force: true force: true
tags: install tags: install
@@ -95,14 +95,14 @@
state: directory state: directory
owner: root owner: root
group: root group: root
mode: 0755 mode: "0755"
tags: install tags: install
- name: Set LimitNOFILE - name: Set LimitNOFILE
ansible.builtin.copy: ansible.builtin.copy:
src: limits.conf src: limits.conf
dest: "/etc/systemd/system/rabbitmq-server.service.d/limits.conf" dest: "/etc/systemd/system/rabbitmq-server.service.d/limits.conf"
mode: 0644 mode: "0644"
owner: root owner: root
group: root group: root
tags: install tags: install
@@ -116,7 +116,7 @@
dest: /var/lib/rabbitmq/.erlang.cookie dest: /var/lib/rabbitmq/.erlang.cookie
owner: rabbitmq owner: rabbitmq
group: rabbitmq group: rabbitmq
mode: 0400 mode: "0400"
tags: install tags: install
notify: Restart Rabbitmq notify: Restart Rabbitmq
@@ -130,7 +130,7 @@
ansible.builtin.template: ansible.builtin.template:
src: rabbitmq.conf.j2 src: rabbitmq.conf.j2
dest: /etc/rabbitmq/rabbitmq.conf dest: /etc/rabbitmq/rabbitmq.conf
mode: 0644 mode: "0644"
owner: rabbitmq owner: rabbitmq
group: rabbitmq group: rabbitmq
notify: Restart Rabbitmq notify: Restart Rabbitmq
@@ -140,7 +140,7 @@
ansible.builtin.template: ansible.builtin.template:
src: rabbitmq-env.conf.j2 src: rabbitmq-env.conf.j2
dest: /etc/rabbitmq/rabbitmq-env.conf dest: /etc/rabbitmq/rabbitmq-env.conf
mode: 0644 mode: "0644"
owner: rabbitmq owner: rabbitmq
group: rabbitmq group: rabbitmq
notify: Restart Rabbitmq notify: Restart Rabbitmq
@@ -150,7 +150,7 @@
ansible.builtin.copy: ansible.builtin.copy:
src: logrotate src: logrotate
dest: /etc/logrotate.d/rabbitmq-server dest: /etc/logrotate.d/rabbitmq-server
mode: 0644 mode: "0644"
user: root user: root
group: root group: root
tags: install tags: install

View File

@@ -0,0 +1,3 @@
---
ssh_host_keys_keys: ''

View File

@@ -0,0 +1,22 @@
---
- name: Reconfigure sshd
ansible.builtin.command: dpkg-reconfigure openssh-server
changed_when: true
- name: Restart sshd
ansible.builtin.service:
name: ssh
state: restarted
- name: Removing ssh host public and private keys
ansible.builtin.file:
path: "{{ item.1 }}"
state: absent
register: state_pub
with_subelements:
- "{{ ssh_host_keys_keys }}"
- files
notify:
- Reconfigure sshd
- Restart sshd

View File

@@ -0,0 +1,20 @@
---
- name: Set specific variables for distributions
ansible.builtin.include_vars: '{{ item }}'
with_first_found:
- '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml'
- '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
- '{{ ansible_distribution }}.yml'
- '{{ ansible_os_family }}.yml'
- default.yml
- name: Checking if host keys have been changed
ansible.builtin.lineinfile:
line: "{{ item.pub_key }}"
dest: "{{ item.files.0 }}"
state: absent
check_mode: true
with_items: "{{ ssh_host_keys_keys }}"
notify: Removing ssh host public and private keys

View File

@@ -0,0 +1,18 @@
---
ssh_host_keys_keys:
- type: ssh_rsa
pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDK+gk7loCySamkbnp4MseVMlNpv03AuxKKW/HSGy5a1BigD7ZcFXmAybY4gbpEe8IHZkBee/5I9DIEh827H6hbL/rhvtx172Vp3tsuPUgDtFJZJqZH4mMKyO713K3qzldBN4zbMUytIy6acFzNiPksAtwib6CtBFcfTreeRrVhaut/B1T9MK38MARXa6g3oNtqSgHOwKGbYHWsQy1Ekav+KosALlyYSVs4d0ioWbO74/hir+Of/5bJpqzddhaYUCxsvmdmBGQtj6n4pQBPuWVWY8a7DN1Js2/TNfgyi++9WH16/sGxbOMPpQmMVvJ9Zg0DhOuOFERAZdTfoRjVi2QOEH1nMaQTocQnd9Oy53XoshAsNYn9TWywzGe6fjw8qzpp+a3ko6hkmYS2/mvIe8gL0suBy1bJpaze9JIX/RIxbQa7GpD0PApabgU4eO/gZZpNAeyFYsFaxNtyuFwUIsClyyPX8kccQ2ufLJkNLC7Ix1bdKGkd2vtUBs1Q31xELUM= root@debian12-tpl001-lan"
files:
- /etc/ssh/ssh_host_rsa_key.pub
- /etc/ssh/ssh_host_rsa_key
- type: ssh_ecdsa
pub_key: "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPciQgbaKkLubW/I3mCTsc7L1Dy+2zMvPw045332vVUmPeKm6t7xAu7nzUnjhUOxFK4qev1wgnOhPibwXQrxHQ4= root@debian12-tpl001-lan"
files:
- /etc/ssh/ssh_host_ecdsa_key.pub
- /etc/ssh/ssh_host_ecdsa_key
- type: ssh_ed25519
pub_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKIRtv0xKbDGS7G2rz9RQWnk/OSizkimy8WFBhokqxXR root@debian12-tpl001-lan"
files:
- /etc/ssh/ssh_host_ed25519_key.pub
- /etc/ssh/ssh_host_ed25519_key

View File

@@ -0,0 +1,13 @@
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
# NOTE(retr0h): Templates no longer fail this lint rule.
# Uncomment if running old Molecule templates.
# truthy: disable

View File

@@ -0,0 +1,81 @@
# Unbound
This role install and configure an Unbound resolver.
It also install a prometheus exporter compiled from [letsencrypt/unbound_exporter](https://github.com/letsencrypt/unbound_exporter)
## Targets
- Debian
## Role variables
- ``unbound_interfaces``: list of interfaces Unbound has to listen on. If not specified, Unbound will listen on 0.0.0.0.
- ``unbound_authorized_cidrs``: list of authorized CIDRS to query the resolver. As Unbound rejects everything by default, if none is set, the resolver won't answer to anyone.
- ``unbound_threads``: number of threads Unbound runs on. (default: 1)
- ``unbound_cache_size``: size of Unbound cache, in Mb. (default: 100)
- ``unbound_zones``: dictionnary about zones that need to be forwarded to another DNS server. It contains info for every managed zone :
``name``: name of the zone
``forward_ip``: list of the servers to forward queries to
``private``: boolean, has to be specified for dummies zones (ex: .priv). It disables DNSSEC validation for thoses zones.
Zones that are not explicitely specified in forwards will be forwarded to root servers.
## Prometheus exporter
* For the exporter to work properly you need to run the following command on each resolver :
```
unbound-control-setup
```
* You also need to ensure that the "extended-statistics: yes" directive is in the conf (it is here).
* The exporter configuration can be change by modifying the systemd service template.
## Unbound logging
In order to enable query log, you need to do the following :
* Add the following directives to the config :
```
logfile: "/var/log/unbound/unbound.log"
log-time-ascii: yes
log-queries: yes
log-replies: yes # will log informations about the reply, slows response time.
```
* Add the following line in /etc/apparmor.d/usr.sbin.unbound (with the comma) :
```
/var/log/unbound/unbound.log rw,
```
* Run the following commands to create both directory and file for logging :
```
mkdir /var/log/unbound
touch /var/log/unbound/unbound.log
chown -R unbound:unbound /var/log/unbound
apparmor_parser -r /etc/apparmor.d/usr.sbin.unbound
```
* Restart unbound.
## Example
In this example, we specify to forward queries for domain aaa.com to xxx.xxx.xxx.xxx, bbb.com to yyy.yyy.yyy.yyy or xxx.xxx.xxx.xxx as a failover, and requests for a private zone to zzz.zzz.zzz.zzz :
```yml
unbound_interfaces:
- "aaa.aaa.aaa.aaa"
unbound_authorized_cidrs:
- "aaa.aaa.aaa.0/24"
- "bbb.bbb.bbb.bbb/32"
unbound_threads: 2
unbound_cache_size: 1536
unbound_zones:
- name: "aaa.com"
forward_ip:
- xxx.xxx.xxx.xxx
- name: "bbb.com"
forward_ip:
- yyy.yyy.yyy.yyy
- xxx.xxx.xxx.xxx
- name: "mysuperprivatezone.priv"
forward_ip:
- zzz.zzz.zzz.zzz
private: true
```

View File

@@ -0,0 +1,6 @@
---
unbound_interfaces:
- "0.0.0.0"
unbound_threads: 1
unbound_cache_size: 100
unbound_loglevel: 1

View File

@@ -0,0 +1,10 @@
/var/log/unbound/*.log {
weekly
missingok
rotate 52
compress
notifempty
postrotate
/usr/sbin/unbound-control log_reopen
endscript
}

Binary file not shown.

View File

@@ -0,0 +1,15 @@
---
- name: Daemon reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart unbound exporter
ansible.builtin.systemd_service:
name: unbound_exporter
state: restarted
- name: Reload Unbound
ansible.builtin.systemd_service:
name: unbound
state: reloaded

View File

@@ -0,0 +1,76 @@
---
- name: Set specific variables for distributions
ansible.builtin.include_vars: "{{ item }}"
with_first_found:
- files:
- '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml' # CentOS-6.5
- '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml' # RedHat-6.5
- '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' # CentOS-6
- '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml' # RedHat-6
- '{{ ansible_distribution }}.yml' # CentOS
- '{{ ansible_os_family }}.yml' # RedHat
- 'default.yml'
- name: Enhance socket buffer size in UDP
ansible.posix.sysctl:
name: "{{ item }}"
value: 4194304
reload: true
with_items:
- "net.core.rmem_max"
- "net.core.wmem_max"
- name: Install Unbound
ansible.builtin.apt:
name: "{{ unbound_package }}"
update_cache: true
state: present
when: ansible_os_family == "Debian"
- name: Setup service configuration
ansible.builtin.template:
src: unbound.conf.j2
dest: /etc/unbound/unbound.conf.d/custom.conf
owner: unbound
group: unbound
mode: "0755"
notify: Reload Unbound
- name: Set permission on conf directory
ansible.builtin.file:
path: /etc/unbound
owner: unbound
group: unbound
recurse: true
- name: Ensure service is enabled at boot and started
ansible.builtin.systemd_service:
name: "unbound"
enabled: true
state: started
- name: Deploy unbound exporter
ansible.builtin.copy:
src: unbound_exporter
dest: /usr/local/bin/unbound_exporter
mode: "0755"
- name: Deploy unbound exporter service
ansible.builtin.template:
src: unbound_exporter.service.j2
dest: /etc/systemd/system/unbound_exporter.service
owner: root
group: root
mode: "0644"
notify:
- Daemon reload
- Restart unbound exporter
- name: Deploy logrotate config file
ansible.builtin.copy:
src: logrotate
dest: /etc/logrotate.d/unbound
owner: root
group: root
mode: "0644"

View File

@@ -0,0 +1,58 @@
## {{ ansible_managed }}
server:
verbosity: {{unbound_loglevel }}
extended-statistics: yes
do-udp: yes
do-tcp: yes
do-ip6: no
num-threads: {{ unbound_threads }}
msg-cache-slabs: {{ unbound_threads }}
rrset-cache-slabs: {{ unbound_threads }}
infra-cache-slabs: {{ unbound_threads }}
key-cache-slabs: {{ unbound_threads }}
rrset-cache-size: {{ unbound_cache_size }}m
key-cache-size: {{ ((unbound_cache_size/2) | int) }}m
msg-cache-size: {{ ((unbound_cache_size/2) | int) }}m
neg-cache-size: {{ ((unbound_cache_size/4) | int) }}m
prefetch: yes
cache-min-ttl: 300
cache-max-ttl: 86400
outgoing-range: 8192
num-queries-per-thread: 4096
so-rcvbuf: 4m
so-sndbuf: 4m
so-reuseport: yes
rrset-roundrobin: yes
val-log-level:1
{% for iface in unbound_interfaces %}
interface: {{ iface }}
{% endfor %}
{% for cidr in unbound_authorized_cidrs %}
access-control: {{ cidr }} allow
{% endfor %}
{% if unbound_zones is defined %}
{% for zone in unbound_zones %}
{% if zone.private is defined and zone.private %}
domain-insecure: "{{ zone.name }}"
{% endif %}
{% endfor %}
{% for zone in unbound_zones %}
forward-zone:
name: "{{ zone.name }}"
{% for fwa in zone.forward_ip %}
forward-addr: {{ fwa }}
{% endfor -%}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,13 @@
[Unit]
Description=Unbound exporter for prometheus
Documentation=https://github.com/letsencrypt/unbound_exporter
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
ExecStart=/usr/local/bin/unbound_exporter -unbound.host="unix:///run/unbound.ctl"
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,3 @@
---
unbound_package: "unbound"

View File

@@ -9,7 +9,7 @@ varnish_acl_purge_hosts:
varnish_health_check: "/healthcheck$" varnish_health_check: "/healthcheck$"
varnishncsa_custom_items: varnish_varnishncsa_custom_items:
- domain1 - domain1
- domain2 - domain2
- domain3 - domain3

View File

@@ -1,22 +1,22 @@
--- ---
- name: rsyslogd restart - name: Rsyslogd restart
ansible.builtin.systemd: ansible.builtin.systemd:
name: rsyslog name: rsyslog
state: restarted state: restarted
- name: varnish reload - name: Varnish reload
ansible.builtin.systemd: ansible.builtin.systemd:
name: varnish name: varnish
state: reloaded state: reloaded
- name: varnish restart - name: Varnish restart
ansible.builtin.systemd: ansible.builtin.systemd:
name: varnish name: varnish
daemon_reload: true daemon_reload: true
state: restarted state: restarted
- name: varnishncsa restart - name: Varnishncsa restart
ansible.builtin.systemd: ansible.builtin.systemd:
name: "varnishncsa-{{ item }}" name: "varnishncsa-{{ item }}"
daemon_reload: true daemon_reload: true

View File

@@ -1,19 +1,19 @@
--- ---
- name: install varnish package - name: Install varnish package
ansible.builtin.apt: ansible.builtin.apt:
name: varnish name: varnish
state: present state: present
update_cache: true update_cache: true
tags: install tags: install
- name: hold packages - name: Hold packages
ansible.builtin.dpkg_selections: ansible.builtin.dpkg_selections:
name: "varnish" name: "varnish"
selection: hold selection: hold
tags: install tags: install
- name: disabled default varnish/varnishcsa service - name: Disabled default varnish/varnishcsa service
ansible.builtin.systemd: ansible.builtin.systemd:
name: "{{ item }}" name: "{{ item }}"
enabled: false enabled: false
@@ -23,95 +23,95 @@
- varnishncsa - varnishncsa
tags: install tags: install
- name: ensure ipv4 takes precedence - name: Ensure ipv4 takes precedence
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: /etc/gai.conf path: /etc/gai.conf
line: precedence ::ffff:0:0/96 100 line: precedence ::ffff:0:0/96 100
tags: install tags: install
- name: deploy rsyslogd conf - name: Deploy rsyslogd conf
ansible.builtin.copy: ansible.builtin.copy:
src: rsyslog.conf src: rsyslog.conf
dest: /etc/rsyslog.d/10-varnishd.conf dest: /etc/rsyslog.d/10-varnishd.conf
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
tags: install tags: install
notify: rsyslogd restart notify: Rsyslogd restart
- name: deploy logrotate conf - name: Deploy logrotate conf
ansible.builtin.copy: ansible.builtin.copy:
src: logrotate.conf src: logrotate.conf
dest: /etc/logrotate.d/varnishd dest: /etc/logrotate.d/varnishd
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
tags: install tags: install
- name: deploy varnishncsa logrotate conf - name: Deploy varnishncsa logrotate conf
ansible.builtin.copy: ansible.builtin.copy:
src: logrotatencsa.conf src: logrotatencsa.conf
dest: /etc/logrotate.d/varnishncsa dest: /etc/logrotate.d/varnishncsa
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
tags: install tags: install
- name: create varnishncsa log dir - name: Create varnishncsa log dir
ansible.builtin.file: ansible.builtin.file:
path: /data/log/web/ path: /data/log/web/
state: directory state: directory
owner: varnishlog owner: varnishlog
group: varnish group: varnish
mode: 0750 mode: "0750"
tags: install tags: install
- name: deploy custom varnish systemd service file - name: Deploy custom varnish systemd service file
ansible.builtin.template: ansible.builtin.template:
src: varnish.service.j2 src: varnish.service.j2
dest: /etc/systemd/system/varnish.service dest: /etc/systemd/system/varnish.service
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
tags: install,config tags: install,config
notify: varnish restart notify: Varnish restart
- name: deploy custom varnishncsa systemd service file - name: Deploy custom varnishncsa systemd service file
ansible.builtin.template: ansible.builtin.template:
src: "{{ env }}-varnishncsa-{{ item }}.service.j2" src: "{{ env }}-varnishncsa-{{ item }}.service.j2"
dest: "/etc/systemd/system/varnishncsa-{{ item }}.service" dest: "/etc/systemd/system/varnishncsa-{{ item }}.service"
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
tags: install,config tags: install,config
with_items: "{{ varnishncsa_custom_items }}" with_items: "{{ varnish_varnishncsa_custom_items }}"
notify: varnishncsa restart notify: Varnishncsa restart
- name: enabled custom varnish systemd service - name: Enabled custom varnish systemd service
ansible.builtin.systemd: ansible.builtin.systemd:
name: varnish name: varnish
enabled: true enabled: true
tags: install tags: install
- name: start varnish on install - name: Start varnish on install
ansible.builtin.systemd: ansible.builtin.systemd:
name: varnish name: varnish
state: started state: started
tags: install tags: install
- name: enabled custom varnishncsa services - name: Enabled custom varnishncsa services
ansible.builtin.systemd: ansible.builtin.systemd:
name: "varnishncsa-{{ item }}" name: "varnishncsa-{{ item }}"
enabled: true enabled: true
with_items: "{{ varnishncsa_custom_items }}" with_items: "{{ varnish_varnishncsa_custom_items }}"
tags: install tags: install
- name: deploy varnish config file - name: Deploy varnish config file
ansible.builtin.template: ansible.builtin.template:
src: default.vcl.j2 src: default.vcl.j2
dest: /etc/varnish/default.vcl dest: /etc/varnish/default.vcl
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
tags: install,config tags: install,config
notify: varnish reload notify: Varnish reload

View File

@@ -8,7 +8,7 @@
- name: Pause to let the operator step-down master is needed - name: Pause to let the operator step-down master is needed
ansible.builtin.pause: ansible.builtin.pause:
prompt: "Run vault operator step-down if needed" prompt: "Run vault operator step-down if needed"
echo: no echo: false
when: "'<MasterNodeName>' in inventory_hostname" when: "'<MasterNodeName>' in inventory_hostname"
- name: Stopping vault service - name: Stopping vault service
@@ -20,7 +20,7 @@
ansible.builtin.apt: ansible.builtin.apt:
name: vault name: vault
update_cache: true update_cache: true
state: latest state: present
- name: Starting vault service - name: Starting vault service
ansible.builtin.systemd_service: ansible.builtin.systemd_service:
@@ -35,4 +35,4 @@
- name: Pause to unseal vault - name: Pause to unseal vault
ansible.builtin.pause: ansible.builtin.pause:
prompt: "Go to vault interface and unseal the node : https://{{ inventory_hostname }}:8200" prompt: "Go to vault interface and unseal the node : https://{{ inventory_hostname }}:8200"
echo: no echo: false

View File

@@ -0,0 +1,15 @@
---
vim_plugins:
- url: "https://github.com/jvirtanen/vim-hcl.git"
dest: "vim-hcl"
- url: "https://github.com/fatih/vim-go.git"
dest: "vim-go"
- url: "https://github.com/vim-airline/vim-airline.git"
dest: "vim-airline"
- url: "https://tpope.io/vim/fugitive.git"
dest: "fugitive"
- url: "https://github.com/dense-analysis/ale"
dest: "ale"
- url: "https://github.com/preservim/nerdtree"
dest: "nerdtree"

View File

@@ -0,0 +1,55 @@
" ansible managed
" We don't care about vi
set nocompatible
" Syntax color
syntax on
" Read/write a .viminfo file, don't store more than 50 lines of registers
set viminfo='20,\"50
" Keep 50 lines of command line history
set history=50
" Disable auto-indent
set noautoindent
" Show line and column number of cursor
set ruler
" Search is case sensistive only if there is uppercase in the pattern
set smartcase
" Disable bell and use visualbell
set noerrorbells
set visualbell
" Highlight search match
set hlsearch
" Always display a status line
set laststatus=2
" Use spaces when using the tab key
set expandtab
" Use 4 spaces
set shiftwidth=4
set softtabstop=4
" Restore cursor position
autocmd BufReadPost * if line("'\"") && line("'\"") <= line("$") | exe "normal `\"" | endif
" Disable automatic visual mode
set mouse-=a
" show line numbers
set nu
" set color for white background
:color desert
:highlight ws ctermbg=red guibg=red
:match ws /\s\+$/
set statusline+=%#warningmsg#
set statusline+=%{SyntasticStatuslineFlag()}
set statusline+=%*
let g:syntastic_always_populate_loc_list = 1
let g:syntastic_auto_loc_list = 1
let g:syntastic_check_on_open = 1
let g:syntastic_check_on_wq = 0
let g:ale_fixers = {
\ '*': ['remove_trailing_lines','trim_whitespace'],
\ 'yaml': ['yamllint'],
\ 'python': ['pylint'],
\}
filetype plugin indent on

View File

@@ -0,0 +1,34 @@
---
- name: install vim and requirements for role
ansible.builtin.apt:
name: "{{ item }}"
state: present
remote_user: "{{ app_user | default('root') }}"
become: true
with_items:
- vim
- git
- name: deploy vimrc
ansible.builtin.copy:
src: vimrc
dest: "{{ vim_dir | default('/root/') }}/.vimrc"
owner: "{{ app_user | default('root') }}"
group: "{{ vim_group | default('root') }}"
mode: 0644
remote_user: "{{ app_user | default('root') }}"
- name: ensure plugins and colors folder exists
ansible.builtin.file:
path: "{{ item }}"
state: directory
with_items:
- "{{ vim_plugins_dir | default('/root/.vim/pack/plugins/start') }}"
- name: download and install vim plugins
ansible.builtin.git:
repo: "{{ item.url }}"
dest: "{{ vim_plugins_dir | default('/root/.vim/pack/plugins/start') }}/{{ item.dest }}"
remote_user: "{{ app_user | default('root') }}"
loop: "{{ vim_plugins }}"

View File

@@ -0,0 +1,4 @@
---
vim_package: vim
vim_plugins_dir: "~/.vim/pack/plugins/start"

View File

@@ -0,0 +1,4 @@
---
vim_package: vim--no_x11
vim_root_group: wheel

39
argocd/tips.md Normal file
View File

@@ -0,0 +1,39 @@
## ArgoCD config
### Create a Kube secrets storing git token
- This token will be used for every repo in the /kirby path
```
---
apiVersion: v1
kind: Secret
metadata:
name: private-repo-creds
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repo-creds
stringData:
type: git
url: https://git.hyrule.ovh/kirby
password: myToken
username: kirby
```
### Create an app of Apps stored in a repo
```
argocd app create apps --dest-namespace argocd --dest-server https://kubernetes.default.svc --repo https://git.hyrule.ovh/kirby/argocd-conf.git --path apps
argocd app sync apps
```
### Service health stuck Progressing
* TLDR : if service type is LoadBalancer, IngressController does not provide status.LoadBalancer.ip field. Fix : change healtcheck behavior
```
kubectl edit configmap argocd-cm -n argocd
data: |
resource.customizations: |
Service:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs
```

21
bash/script.md Normal file
View File

@@ -0,0 +1,21 @@
## Useful functions
### Bash trap
- https://tldp.org/LDP/Bash-Beginners-Guide/html/sect_12_02.html
```bash
# Define function called before exiting script after an error is caught.
function set_error_status() {
echo "[$(date '+%Y%m%d %H%M%S')] : Something went wrong in the script, exiting." | tee -a "${LOGFILE}"
echo "2 vault-snapshot-restore - KO" > ${STATUSFILE}
}
# Set the function called when the ERR signal is caught.
trap set_error_status ERR
```
### Exit immediatly on error and when variables are empty
- https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html
```bash
set -eu
```

View File

@@ -0,0 +1,122 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: http-broadcaster
spec:
chart:
spec:
chart: myChart
reconcileStrategy: ChartVersion
version: "2.x.x"
sourceRef:
kind: HelmRepository
name: myChart
namespace: infrastructure
interval: 10m0s
values:
environment:
fullnameOverride: "http-broadcaster"
app:
name: "app"
image:
repository: gitlabregistry.example.com/infrastructure/http-broadcaster
pullPolicy: IfNotPresent
tag:
labels:
technology: "go"
role: "api"
resources:
requests:
memory: 64Mi
cpu: 200m
limits:
memory: 64Mi
cpu: 400m
containerPort:
broadcaster: 6081
readinessProbe:
exec:
command:
- curl
- -f
- -L
- -m
- "5"
- "127.0.0.1:6081/healthcheck"
livenessProbe:
exec:
command:
- curl
- -f
- -L
- -m
- "5"
- "127.0.0.1:6081/healthcheck"
command:
- "/bin/sh"
- "-c"
- "[ -f /vault/secrets/.env ] && http-broadcaster --metrics"
lifecycle: {}
securityContext:
capabilities:
drop:
- ALL
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
# Generic sidecars
nginx:
enabled: false
fpmExporter:
enabled: false
elasticAgent:
enabled: false
logrotate:
enabled: false
imagePullSecrets:
- name: registry-credentials
vault:
enabled: true
serviceAccount:
create: true
service:
extraPorts:
- name: web
port: 6081
targetPort: 6081
ingress:
enabled: true
autoIngress:
enabled: true
path: "/"
port: 6081
replicaCount: 2
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 1
targetCPUUtilizationPercentage: 40

View File

@@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- hr-http-broadcaster.yaml

View File

@@ -0,0 +1,59 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: k6-operator
spec:
chart:
spec:
# https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/values.yaml
chart: k6-operator
reconcileStrategy: ChartVersion
version: "3.x.x"
sourceRef:
kind: HelmRepository
name: grafana
namespace: infrastructure
interval: 10m0s
values:
fullnameOverride: "k6-operator"
customLabels:
team: "infrastructure"
manager:
replicas: 1
serviceAccount:
name: "k6-operator-controller"
create: true
image:
registry: ghcr.io
repository: grafana/k6-operator
tag: controller-v0.0.19
pullPolicy: IfNotPresent
livenessProbe: {}
readinessProbe: {}
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
# Indique que seul le namespace infrastructure est surveille.
- name: WATCH_NAMESPACE
value: 'infrastructure'
authProxy:
# Disable/Enable authentication on metric endpoint
enabled: false
installCRDs: true
namespace:
create: false
prometheus:
enabled: false

View File

@@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- hr-k6-operator.yaml

View File

@@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- remote
- system

View File

@@ -0,0 +1,31 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: http-broadcaster
spec:
chart:
spec:
chart: myChart
reconcileStrategy: ChartVersion
version: "2.x.x"
sourceRef:
kind: HelmRepository
name: myChart
namespace: infrastructure
interval: 10m0s
values:
environment: "prd"
app:
image:
tag: 2.1.5 # {"$imagepolicy": "system:http-broadcaster:tag"}
autoscaling:
enabled: false
vault:
path: "app/data/http-broadcaster/prd/envVars"
ingress:
autoIngress:
host: "purgatory.example.com"
metrics:
enabled: true
serviceMonitor:
enabled: true

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../../base/http-broadcaster/app
patches:
- path: hr-http-broadcaster.yaml

View File

@@ -0,0 +1,28 @@
---
apiVersion: image.toolkit.fluxcd.io/v1beta2
kind: ImageUpdateAutomation
metadata:
name: http-broadcaster
labels:
team: system
spec:
git:
checkout:
ref:
branch: main
commit:
author:
email: fluxcdbot@example.com
name: fluxcdbot
messageTemplate: 'deps(apps/production): {{ .AutomationObject }} ImageUpdateAutomation'
push:
branch: main
interval: 3m0s
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
update:
path: ./apps/production/system/http-broadcaster
strategy: Setters

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- iua-http-broadcaster.yaml
- policy-http-broadcaster.yaml
- registry-http-broadcaster.yaml

View File

@@ -0,0 +1,15 @@
apiVersion: image.toolkit.fluxcd.io/v1beta1
kind: ImagePolicy
metadata:
name: http-broadcaster
labels:
team: system
spec:
imageRepositoryRef:
name: http-broadcaster
filterTags:
pattern: '^v?(?P<rv>[0-9]+\.[0-9]+\.[0-9]+)$'
extract: '$rv'
policy:
semver:
range: '^2.x.x-0'

View File

@@ -0,0 +1,13 @@
apiVersion: image.toolkit.fluxcd.io/v1beta1
kind: ImageRepository
metadata:
name: http-broadcaster
labels:
team: system
spec:
secretRef:
name: registry-credentials
image: gitlabregistry.example.com/infrastructure/http-broadcaster
interval: 5m0s
exclusionList:
- "_dev$"

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: system
resources:
- app
- automations

View File

@@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- http-broadcaster

View File

@@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../k6-operator

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- remote
- system
- infrastructure

View File

@@ -0,0 +1,37 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: http-broadcaster
spec:
chart:
spec:
chart: myChart
reconcileStrategy: ChartVersion
version: "2.x.x"
sourceRef:
kind: HelmRepository
name: myChart
namespace: infrastructure
interval: 10m0s
values:
environment: "stg"
app:
image:
tag: 2.2.0-rc.1 # {"$imagepolicy": "system:http-broadcaster:tag"}
autoscaling:
enabled: false
vault:
path: "app/data/http-broadcaster/stg/envVars"
# Test to verify that Traefik is not rejecting requests for being
# outside limit size.
ingress:
middlewares:
buffering:
enabled: false
autoIngress:
host: "purgatory.stg.example.com"
# Telling the chart that we want the service to be scrape.
metrics:
enabled: true
serviceMonitor:
enabled: true

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../../base/http-broadcaster/app
patches:
- path: hr-http-broadcaster.yaml

View File

@@ -0,0 +1,28 @@
---
apiVersion: image.toolkit.fluxcd.io/v1beta2
kind: ImageUpdateAutomation
metadata:
name: http-broadcaster
labels:
team: system
spec:
git:
checkout:
ref:
branch: main
commit:
author:
email: fluxcdbot@example.com
name: fluxcdbot
messageTemplate: 'deps(apps/staging): {{ .AutomationObject }} ImageUpdateAutomation'
push:
branch: main
interval: 3m0s
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
update:
path: ./apps/staging/system/http-broadcaster
strategy: Setters

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- iua-http-broadcaster.yaml
- policy-http-broadcaster.yaml
- registry-http-broadcaster.yaml
- receiver-http-broadcaster.yaml

View File

@@ -0,0 +1,15 @@
apiVersion: image.toolkit.fluxcd.io/v1beta1
kind: ImagePolicy
metadata:
name: http-broadcaster
labels:
team: system
spec:
imageRepositoryRef:
name: http-broadcaster
filterTags:
pattern: '^v?(?P<rv>[0-9]+\.[0-9]+\.[0-9]+-(?:alpha|beta|rc)\.[0-9]+)$'
extract: '$rv'
policy:
semver:
range: '^2.x.x-0'

View File

@@ -0,0 +1,15 @@
apiVersion: notification.toolkit.fluxcd.io/v1
kind: Receiver
metadata:
name: http-broadcaster-app
spec:
type: generic
secretRef:
name: webhook-receiver-token
resources:
- apiVersion: image.toolkit.fluxcd.io/v1beta1
kind: ImageRepository
name: http-broadcaster-app
- apiVersion: image.toolkit.fluxcd.io/v1beta2
kind: ImageUpdateAutomation
name: http-broadcaster

View File

@@ -0,0 +1,13 @@
apiVersion: image.toolkit.fluxcd.io/v1beta1
kind: ImageRepository
metadata:
name: http-broadcaster
labels:
team: system
spec:
secretRef:
name: registry-credentials
image: gitlabregistry.example.com/infrastructure/http-broadcaster
interval: 5m0s
exclusionList:
- "_dev$"

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: system
resources:
- app
- automations

View File

@@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- http-broadcaster

View File

@@ -0,0 +1,44 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: namespaces
namespace: flux-system
spec:
interval: 1m0s
path: ./infrastructure/base/namespaces
prune: false # Prevent resources from being deleted when it is removed from the yaml file
sourceRef:
kind: GitRepository
name: flux-system
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: infrastructure
namespace: flux-system
spec:
interval: 1m0s
path: ./infrastructure/production
prune: true
wait: true
timeout: 3m0s
sourceRef:
kind: GitRepository
name: flux-system
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: apps
namespace: flux-system
spec:
interval: 5m0s
dependsOn:
- name: infrastructure
sourceRef:
kind: GitRepository
name: flux-system
path: ./apps/production
prune: true
wait: true

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,27 @@
# This manifest was generated by flux. DO NOT EDIT.
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: flux-system
namespace: flux-system
spec:
interval: 1m0s
ref:
branch: main
secretRef:
name: flux-system
url:
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: flux-system
namespace: flux-system
spec:
interval: 10m0s
path: ./clusters/cluster1-prd
prune: true
sourceRef:
kind: GitRepository
name: flux-system

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gotk-components.yaml
- gotk-sync.yaml

View File

@@ -0,0 +1,117 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: external-secrets
spec:
chart:
spec:
chart: external-secrets
reconcileStrategy: ChartVersion
version: "0.14.x"
sourceRef:
kind: HelmRepository
name: external-secrets-operator
namespace: infrastructure
interval: 10m0s
values:
replicaCount: 3
affinity: {}
installCRDs: true
crds:
createClusterExternalSecret: true
createClusterSecretStore: true
createPushSecret: true
leaderElect: true
concurrent: 3
serviceAccount:
create: true
automount: true
rbac:
create: true
resources:
requests:
cpu: 10m
memory: 32Mi
serviceMonitor:
enabled: false
interval: 30s
scrapeTimeout: 25s
metrics:
service:
enabled: false
podDisruptionBudget:
enabled: true
minAvailable: 1
# maxUnavailable: 1
webhook:
create: true
replicaCount: 1
rbac:
create: true
serviceAccount:
create: true
automount: true
podDisruptionBudget:
enabled: true
minAvailable: 1
# maxUnavailable: 1
serviceMonitor:
enabled: false
interval: 30s
scrapeTimeout: 25s
metrics:
service:
enabled: false
resources:
requests:
cpu: 10m
memory: 32Mi
affinity: {}
certController:
create: true
requeueInterval: "5m"
replicaCount: 1
rbac:
create: true
serviceAccount:
create: true
automount: true
podDisruptionBudget:
enabled: true
minAvailable: 1
# maxUnavailable: 1
serviceMonitor:
enabled: false
interval: 30s
scrapeTimeout: 25s
metrics:
service:
enabled: false
resources:
requests:
cpu: 10m
memory: 32Mi
affinity: {}

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: infrastructure
resources:
- hr-external-secrets.yaml

View File

@@ -0,0 +1,29 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: inf-external-secrets-core
namespace: flux-system
spec:
interval: 5m
path: ./infrastructure/base/external-secrets/core
prune: true
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: inf-external-secrets-resources
namespace: flux-system
spec:
dependsOn:
- name: inf-external-secrets-core
interval: 5m
path: ./infrastructure/base/external-secrets/resources
prune: true
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dependencies.yaml

Some files were not shown because too many files have changed in this diff Show More