Adding ansible section

This commit is contained in:
kirby 2025-05-22 14:43:42 +02:00
parent 8c6e940433
commit 0985d65f86
26 changed files with 900 additions and 0 deletions

View File

@ -0,0 +1,56 @@
# Bastion deployment role
## Description
Ce rôle a pour but de créer un bastion ssh avec des configurations sécurisées au niveau réseau (nftables et fail2ban) et système (ssh,sudoers).
- Déploie les utilisateurs et leur clé publique.
- Déploie les clés publiques autorisés à se connecter au compte root et limité au connexion depuis 10.17.0.0/16.
- Déploie la configuration sudoers pour que les utilisateurs puissent se connecter au compte root.
- Déploie la configuration nftables et fail2ban.
- Déploie la configuration ssh.
## Variables
- private_networks: Réseaux privés utilisés pour l'administration.
- allowed_networks: Réseaux en liste blanche dans fail2ban.
- admin_users: Liste des utilisateurs autorisés à se connecter avec leur clé publique.
- rundeck_user: Clé publique de rundeck_inf
- dev_users: Liste des utilisateurs normaux à créer
- fail2ban_ignore_ips : Liste des IPs/network à ignorer pour fail2ban
## Installation
1. Installer la machine à partir d'un template existant.
2. Désactiver la configuration par DHCP sur l'interface privée
```bash
vim /etc/network/interfaces
iface <interface> inet static
address <private_ip/netmask>
```
3. ansible-playbook -i hosts-dmz playbooks/bastion.yml -t all -l <hostname>
## Usage
- Déployer un bastion complet
```
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t all
```
- Modifier les configurations de pare-feu
```
vim roles/bastion/templates/nftables.conf.j2
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t firewall
```
- Modifier/Ajouter un utilisateur
```
vim group_vars/all
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t users,ssh
```
- Modifier la configuration SSH
```
vim roles/bastion/templates/sshd_config.j2
ansible-playbook -i hosts-dmz playbooks/bastion.yml -t ssh
```
## Questions/TODO
- SSH 2FA

View File

@ -0,0 +1,10 @@
---
private_networks: ""
allowed_networks: ""
fail2ban_ignore_ips: ""
dev_users: []
admin_users: []
rundeck_users: []
log_server: ""

View File

@ -0,0 +1,25 @@
---
- name: reload nftables
ansible.builtin.systemd:
name: nftables
state: reloaded
- name: reload fail2ban
ansible.builtin.systemd:
name: fail2ban
state: reloaded
- name: restart fail2ban
ansible.builtin.systemd:
name: fail2ban
state: restart
- name: restart ssh
ansible.builtin.systemd:
service: sshd
state: restarted
- name: restart rsyslog
ansible.builtin.systemd:
service: rsyslog
state: restarted

View File

@ -0,0 +1,44 @@
- name: ensure nftables service is started and enabled
ansible.builtin.systemd:
name: nftables
state: started
enabled: true
tags: all,firewall
- name: deploying nftables configuration
ansible.builtin.template:
src: nftables.conf.j2
dest: /etc/nftables.conf
owner: root
group: root
mode: '0755'
backup: true
validate: "nft -c -f %s"
notify:
- reload nftables
- restart fail2ban
tags: all,firewall
- name: ensure fail2ban is installed
ansible.builtin.apt:
name: fail2ban
state: present
update_cache: true
tags: all,firewall
- name: ensure fail2ban is enabled and running
ansible.builtin.systemd:
name: fail2ban
state: started
enabled: true
tags: all,firewall
- name: deploying fail2ban ssh conf
ansible.builtin.template:
src: sshd.conf.j2
dest: /etc/fail2ban/jail.d/sshd.conf
owner: root
group: root
mode: '0644'
notify: reload fail2ban
tags: all,firewall

View File

@ -0,0 +1,13 @@
---
- name: user deployment related tasks
include_tasks: users.yml
tags: all,users,rootonly
- name: firewall deployment related tasks
include_tasks: firewall.yml
tags: all,firewall
- name: ssh deployment related tasks
include_tasks: ssh.yml
tags: all,ssh

View File

@ -0,0 +1,28 @@
---
- name: ensure sshd is enabled
ansible.builtin.systemd:
name: sshd
enabled: true
tags: all,ssh
- name: deploy sshd_config
ansible.builtin.template:
src: sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: 0644
validate: "sshd -T -f %s"
notify: restart ssh
tags: all,ssh
- name: deploy rsyslog conf
ansible.builtin.template:
src: auth.conf.j2
dest: /etc/rsyslog.d/auth.conf
owner: root
group: root
mode: 0644
notify: restart rsyslog
tags: all,ssh

View File

@ -0,0 +1,53 @@
---
- name: installing sudo
ansible.builtin.apt:
name: sudo
update_cache: true
state: present
tags: all,users
- name: adding targetpw directive for sudo
ansible.builtin.lineinfile:
path: /etc/sudoers
line: "{{ item }}"
state: present
with_items:
- "Defaults targetpw"
- "Defaults insults"
tags: all,users
- name: creating admin users
ansible.builtin.user:
name: "{{ item.username }}"
shell: /bin/bash
groups: sudo
with_items: "{{ admin_users }}"
tags: all,users
- name: creating dev users
ansible.builtin.user:
name: "{{ item.username }}"
shell: /bin/bash
groups: sudo
with_items: "{{ dev_users }}"
tags: all,users
- name: adding authorized_keys for regular users
ansible.builtin.authorized_key:
user: "{{ item.username }}"
key: "{{ item.public_key }}"
state: "{{ item.state | default('present') }}"
with_items:
- "{{ admin_users }}"
- "{{ dev_users }}"
tags: all,users
- name: adding authorized_keys for root users
ansible.builtin.authorized_key:
user: "root"
key: "{{ item.public_key }}"
key_options: 'from="{{ private_networks }}"'
state: "{{ item.state }}"
with_items: "{{ admin_users }}"
tags: all,users,rootonly

View File

@ -0,0 +1,2 @@
#{{ ansible_managed }}
auth,authpriv.* @{{ log_server }}

View File

@ -0,0 +1,32 @@
#{{ ansible_managed }}
#!/usr/sbin/nft -f
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
iifname lo accept;
tcp dport 22 accept;
icmp type echo-request accept;
# established/related connections
ct state established,related accept
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy drop;
iifname lo accept;
tcp dport 22 accept;
tcp dport {80, 443, 9200} accept;
tcp dport {53, 123} accept;
udp dport {53, 123, 1514} accept;
icmp type echo-request accept;
# established/related connections
ct state established,related accept;
}
}

View File

@ -0,0 +1,12 @@
# {{ ansible_managed }}
[DEFAULT]
ignoreip = {{ fail2ban_ignore_ips }}
findtime = 3600
bantime = 86400
maxretry = 3
banaction = nftables-multiport
banaction_allports = nftables-allports
[sshd]
enabled = true

View File

@ -0,0 +1,95 @@
Include /etc/ssh/sshd_config.d/*.conf
#Port 22
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::
#LoginGraceTime 2m
PermitRootLogin without-password
#StrictModes yes
#MaxAuthTries 3
#MaxSessions 10
#PubkeyAuthentication yes
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
PasswordAuthentication no
PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
ChallengeResponseAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
UsePAM yes
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding no
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
ClientAliveInterval 300
ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
{% for item in admin_users -%}
Match User {{ item.username }}
{% endfor %}
{% for item in dev_users -%}
Match User {{ item.username }}
{% endfor %}

View File

@ -0,0 +1,30 @@
# Installation et configuration de varnish
## Variables
* varnish_listen_host: Adresse IP sur laquelle varnish écoute. (Default: 0.0.0.0)
* varnish_listen_port: Port sur lequel varnish écoute. (Default : 80)
* varnish_maxmemory: Mémoire maximum occupée par varnish. (Default : 3G)
* varnish_acl_purge_hosts: Adresse IP autorisée à effectuer des requêtes PURGE. (Default 127.0.0.1)
* varnish_health_check: URL de healthcheck des applications qui ne seront pas cachées. (Default : /healthcheck$)
* varnish_backend_servers: Liste des serveurs de backends.
```
varnish_backend_servers:
docker-hpv008-stg:
host: "10.13.100.8"
port: "80"
docker-hpv009-stg:
host: "10.13.100.9"
port: "80"
```
## Fonctionnalités
* Désactive les services systemd fournit de base pour Varnish et Varnishncsa.
* Dépose et active des services custom pour Varnish et Varnishncsa qui permettent la personnalisation des paramètres de lancement.
* Gère la configuration VCL.
* Dépose les configurations logrotate et rsyslog.
## Modification de configuration
```
vim roles/varnish/templates/default.vcl.j2
ansible-playbook -i hosts-stg -l varnish_stg -t config playbooks/varnish.yml
```

View File

@ -0,0 +1,15 @@
---
varnish_listen_host: '0.0.0.0'
varnish_listen_port: 6081
varnish_maxmemory: '3G'
varnish_acl_purge_hosts:
- 127.0.0.1
varnish_health_check: "/healthcheck$"
varnishncsa_custom_items:
- domain1
- domain2
- domain3

View File

@ -0,0 +1,9 @@
/var/log/varnishd.log {
daily
missingok
rotate 14
compress
delaycompress
notifempty
create 0640 www-data adm
}

View File

@ -0,0 +1,16 @@
/data/log/web/*-access.log /data/log/web/varnishncsa.log {
weekly
missingok
rotate 52
compress
delaycompress
notifempty
create 0640 varnishlog varnish
lastaction
systemctl restart varnishncsa-privateapi.service
systemctl restart varnishncsa-publicapi.service
systemctl restart varnishncsa-purge.service
systemctl restart varnishncsa-webservices.service
systemctl restart varnishncsa.service
endscript
}

View File

@ -0,0 +1,2 @@
if $programname == 'varnishd' then /var/log/varnishd.log
&stop

View File

@ -0,0 +1,24 @@
---
- name: rsyslogd restart
ansible.builtin.systemd:
name: rsyslog
state: restarted
- name: varnish reload
ansible.builtin.systemd:
name: varnish
state: reloaded
- name: varnish restart
ansible.builtin.systemd:
name: varnish
daemon_reload: true
state: restarted
- name: varnishncsa restart
ansible.builtin.systemd:
name: "varnishncsa-{{ item }}"
daemon_reload: true
state: restarted
with_items: "{{ varnishncsa_custom_items }}"

View File

@ -0,0 +1,117 @@
---
- name: install varnish package
ansible.builtin.apt:
name: varnish
state: present
update_cache: true
tags: install
- name: hold packages
ansible.builtin.dpkg_selections:
name: "varnish"
selection: hold
tags: install
- name: disabled default varnish/varnishcsa service
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
state: stopped
with_items:
- varnish
- varnishncsa
tags: install
- name: ensure ipv4 takes precedence
ansible.builtin.lineinfile:
path: /etc/gai.conf
line: precedence ::ffff:0:0/96 100
tags: install
- name: deploy rsyslogd conf
ansible.builtin.copy:
src: rsyslog.conf
dest: /etc/rsyslog.d/10-varnishd.conf
owner: root
group: root
mode: 0644
tags: install
notify: rsyslogd restart
- name: deploy logrotate conf
ansible.builtin.copy:
src: logrotate.conf
dest: /etc/logrotate.d/varnishd
owner: root
group: root
mode: 0644
tags: install
- name: deploy varnishncsa logrotate conf
ansible.builtin.copy:
src: logrotatencsa.conf
dest: /etc/logrotate.d/varnishncsa
owner: root
group: root
mode: 0644
tags: install
- name: create varnishncsa log dir
ansible.builtin.file:
path: /data/log/web/
state: directory
owner: varnishlog
group: varnish
mode: 0750
tags: install
- name: deploy custom varnish systemd service file
ansible.builtin.template:
src: varnish.service.j2
dest: /etc/systemd/system/varnish.service
owner: root
group: root
mode: 0644
tags: install,config
notify: varnish restart
- name: deploy custom varnishncsa systemd service file
ansible.builtin.template:
src: "{{ env }}-varnishncsa-{{ item }}.service.j2"
dest: "/etc/systemd/system/varnishncsa-{{ item }}.service"
owner: root
group: root
mode: 0644
tags: install,config
with_items: "{{ varnishncsa_custom_items }}"
notify: varnishncsa restart
- name: enabled custom varnish systemd service
ansible.builtin.systemd:
name: varnish
enabled: true
tags: install
- name: start varnish on install
ansible.builtin.systemd:
name: varnish
state: started
tags: install
- name: enabled custom varnishncsa services
ansible.builtin.systemd:
name: "varnishncsa-{{ item }}"
enabled: true
with_items: "{{ varnishncsa_custom_items }}"
tags: install
- name: deploy varnish config file
ansible.builtin.template:
src: default.vcl.j2
dest: /etc/varnish/default.vcl
owner: root
group: root
mode: 0644
tags: install,config
notify: varnish reload

View File

@ -0,0 +1,165 @@
vcl 4.1;
import std;
import directors;
probe docker {
.url = "/ping";
.timeout= 1s;
.interval = 5s;
.window = 5;
.threshold = 3;
}
{% if varnish_backend_servers is defined %}
{% for backend, value in varnish_backend_servers.items() | list %}
backend {{ backend }} {
.host = "{{ value.host }}";
.port = "{{ value.port }}";
.probe = docker;
}
{% endfor %}
sub vcl_init {
new docker_servers = directors.round_robin();
{% for backend, value in varnish_backend_servers.items() |list %}
docker_servers.add_backend({{ backend }});
{% endfor %}
}
{% endif %}
acl purge {
{% for acl_host in varnish_acl_purge_hosts %}
"{{ acl_host }}";
{% endfor %}
}
sub vcl_recv {
set req.backend_hint = docker_servers.backend();
set req.http.X-Forwarded-Port = "80";
if (req.method == "PURGE") {
# Check if PURGE coming from allowed purge IP
if (client.ip !~ purge) {
return (synth(405, "Method not allowed"));
}
ban("req.url ~ " + req.url);
return (synth(200, "Purged"));
}
if (req.method == "BAN") {
if (!client.ip ~ purge) {
return (synth(405, "Not allowed"));
}
if (req.http.X-Cache-Tags) {
ban("obj.http.X-Cache-Tags ~ " + req.http.X-Cache-Tags);
return (synth(200, "Banned"));
} else if (req.http.ApiPlatform-Ban-Regex) {
ban("obj.http.Cache-Tags ~ " + req.http.ApiPlatform-Ban-Regex);
return (synth(200, "Banned"));
} else {
ban("obj.http.X-Url ~ " + req.http.X-Url);
return (synth(200, "Banned"));
}
}
if (req.method != "GET" &&
req.method != "HEAD" &&
req.method != "PUT" &&
req.method != "POST" &&
req.method != "TRACE" &&
req.method != "OPTIONS" &&
req.method != "DELETE") {
/* Non-RFC2616 or CONNECT which is weird. */
return (pipe);
}
# We only deal with GET and HEAD by default
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
# Bypass health check requests
if (req.url ~ "{{ varnish_health_check }}") {
return (pass);
}
# normalize url in case of leading HTTP scheme and domain
set req.url = regsub(req.url, "^http[s]?://", "");
# collect all cookies
std.collect(req.http.Cookie);
# Compression filter. See https://www.varnish-cache.org/trac/wiki/FAQ/Compression
if (req.http.Accept-Encoding) {
if (req.url ~ "\.(jpg|jpeg|png|gif|gz|tgz|bz2|tbz|mp3|ogg|swf|flv)$") {
# No point in compressing these
unset req.http.Accept-Encoding;
} elsif (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate" && req.http.user-agent !~ "MSIE") {
set req.http.Accept-Encoding = "deflate";
} else {
# unknown algorithm
unset req.http.Accept-Encoding;
}
}
return (hash);
}
sub vcl_backend_response {
# Happens after we have read the response headers from the backend.
#
# Here you clean the response headers, removing silly Set-Cookie headers
# and other mistakes your backend does.
## cache only successfully responses and 404s
if (beresp.status != 200 && beresp.status != 404) {
set beresp.ttl = 0s;
set beresp.uncacheable = true;
return (deliver);
} elsif (beresp.http.Cache-Control ~ "private") {
set beresp.uncacheable = true;
# cache the fact that the response is non-cacheable for 1 day
set beresp.ttl = 86400s;
return (deliver);
}
# validate if we need to cache it and prevent from setting cookie
if (beresp.ttl > 0s && (bereq.method == "GET" || bereq.method == "HEAD")) {
unset beresp.http.set-cookie;
}
return (deliver);
}
sub vcl_deliver {
# Always include hit/miss information in response
if (resp.http.x-varnish ~ " ") {
set resp.http.X-Cache = "HIT";
} else {
set resp.http.X-Cache = "MISS";
}
set resp.http.X-Cache-Hits = obj.hits;
# Not letting browser to cache non-static files.
if (resp.http.Cache-Control !~ "private") {
set resp.http.Pragma = "no-cache";
set resp.http.Expires = "-1";
set resp.http.Cache-Control = "no-store, no-cache, must-revalidate, max-age=0";
}
{% if env == "prd" %}
# Unset a bunch of header if we are in prod environment
unset resp.http.X-Powered-By;
unset resp.http.Server;
unset resp.http.X-Varnish;
unset resp.http.Via;
unset resp.http.Link;
{% endif %}
}

View File

@ -0,0 +1,20 @@
[Unit]
Description=Varnish HTTP accelerator log daemon for domain1
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishncsa
After=varnish.service
[Service]
Type=forking
PIDFile=/run/varnishncsa/varnishncsa-domain1.pid
RuntimeDirectory=varnishncsa
User=varnishlog
Group=varnish
ExecStart=/usr/bin/varnishncsa -q "ReqHeader:host eq 'domain1'" -F "%%{Host}i %%{X-Forwarded-For}i %%l %%u %%t \"%%m %%U%%q %%H\" %%s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" -a -w /data/log/web/domain1-access.log -D -P /run/varnishncsa/varnishncsa-domain1.pid
ExecReload=/bin/kill -HUP $MAINPID
PrivateDevices=true
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
[Unit]
Description=Varnish HTTP accelerator log daemon for domain2
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishncsa
After=varnish.service
[Service]
Type=forking
PIDFile=/run/varnishncsa/varnishncsa-domain2.pid
RuntimeDirectory=varnishncsa
User=varnishlog
Group=varnish
ExecStart=/usr/bin/varnishncsa -q "ReqHeader:host eq 'domain2'" -F "%%{Host}i %%{X-Forwarded-For}i %%l %%u %%t \"%%m %%U%%q %%H\" %%s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" -a -w /data/log/web/domain2-access.log -D -P /run/varnishncsa/varnishncsa-domain2.pid
ExecReload=/bin/kill -HUP $MAINPID
PrivateDevices=true
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
[Unit]
Description=Varnish HTTP accelerator log daemon for domain3
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishncsa
After=varnish.service
[Service]
Type=forking
PIDFile=/run/varnishncsa/varnishncsa-domain3.pid
RuntimeDirectory=varnishncsa
User=varnishlog
Group=varnish
ExecStart=/usr/bin/varnishncsa -q "ReqHeader:host eq 'domain3'" -F "%%{Host}i %%{X-Forwarded-For}i %%l %%u %%t \"%%m %%U%%q %%H\" %%s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" -a -w /data/log/web/domain3-access.log -D -P /run/varnishncsa/varnishncsa-domain3.pid
ExecReload=/bin/kill -HUP $MAINPID
PrivateDevices=true
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
[Unit]
Description=Varnish HTTP accelerator log daemon for domain1
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishncsa
After=varnish.service
[Service]
Type=forking
PIDFile=/run/varnishncsa/varnishncsa-domain1.pid
RuntimeDirectory=varnishncsa
User=varnishlog
Group=varnish
ExecStart=/usr/bin/varnishncsa -q "ReqHeader:host eq 'domain1'" -F "%%{Host}i %%{X-Forwarded-For}i %%l %%u %%t \"%%m %%U%%q %%H\" %%s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" -a -w /data/log/web/domain1-access.log -D -P /run/varnishncsa/varnishncsa-domain1.pid
ExecReload=/bin/kill -HUP $MAINPID
PrivateDevices=true
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
[Unit]
Description=Varnish HTTP accelerator log daemon for domain2
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishncsa
After=varnish.service
[Service]
Type=forking
PIDFile=/run/varnishncsa/varnishncsa-domain2.pid
RuntimeDirectory=varnishncsa
User=varnishlog
Group=varnish
ExecStart=/usr/bin/varnishncsa -q "ReqHeader:host eq 'domain2'" -F "%%{Host}i %%{X-Forwarded-For}i %%l %%u %%t \"%%m %%U%%q %%H\" %%s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" -a -w /data/log/web/domain2-access.log -D -P /run/varnishncsa/varnishncsa-domain2.pid
ExecReload=/bin/kill -HUP $MAINPID
PrivateDevices=true
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,20 @@
[Unit]
Description=Varnish HTTP accelerator log daemon for domain3
Documentation=https://www.varnish-cache.org/docs/6.1/ man:varnishncsa
After=varnish.service
[Service]
Type=forking
PIDFile=/run/varnishncsa/varnishncsa-domain3.pid
RuntimeDirectory=varnishncsa
User=varnishlog
Group=varnish
ExecStart=/usr/bin/varnishncsa -q "ReqHeader:host eq 'domain3'" -F "%%{Host}i %%{X-Forwarded-For}i %%l %%u %%t \"%%m %%U%%q %%H\" %%s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" -a -w /data/log/web/domain3-access.log -D -P /run/varnishncsa/varnishncsa-domain3.pid
ExecReload=/bin/kill -HUP $MAINPID
PrivateDevices=true
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,32 @@
[Unit]
Description=Varnish Cache, a high-performance HTTP accelerator
Documentation=https://www.varnish-cache.org/docs/ man:varnishd
[Service]
Type=simple
# Maximum number of open files (for ulimit -n)
LimitNOFILE=131072
# Locked shared memory - should suffice to lock the shared memory log
# (varnishd -l argument)
# Default log size is 80MB vsl + 1M vsm + header -> 82MB
# unit is bytes
LimitMEMLOCK=85983232
ExecStart=/usr/sbin/varnishd \
-j unix,user=vcache \
-F \
-a {{ varnish_listen_host }}:{{ varnish_listen_port }} \
-T localhost:6082 \
-f /etc/varnish/default.vcl \
-S /etc/varnish/secret \
-s malloc,{{ varnish_maxmemory }} \
-p http_resp_hdr_len=16384
ExecReload=/usr/share/varnish/varnishreload
ProtectSystem=full
ProtectHome=true
PrivateTmp=true
PrivateDevices=true
[Install]
WantedBy=multi-user.target