* ADD: initial commit

This commit is contained in:
2024-01-20 11:52:52 +01:00
parent 95fc557025
commit 31054ebf7d
41 changed files with 1466 additions and 0 deletions

14
LICENSE Normal file
View File

@@ -0,0 +1,14 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

97
build-all.sh Executable file
View File

@@ -0,0 +1,97 @@
#!/bin/bash
set -e
set -o pipefail
SCRIPT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
REPO_URL="${REPO_URL:-uleenucks}"
JOBS=${JOBS:-2}
DOCKER="$(which docker)"
DOCKERFILESPATH="${HOME}/closed/dockerfiles"
ERRORS="$(pwd)/errors"
dcleanup(){
${DOCKER} rm $(${DOCKER} ps -aq 2>/dev/null) 2>/dev/null
${DOCKER} rm -v $(${DOCKER} ps --filter status=exited -q 2>/dev/null) 2>/dev/null
${DOCKER} rmi $(${DOCKER} images --filter dangling=true -q 2>/dev/null) 2>/dev/null
}
build_and_push_kaniko(){
base=$1
suite=$2
build_dir=$3
echo "Building ${REPO_URL}/${base}:${suite} for context ${build_dir}"
docker run \
-v "${HOME}/config.json:/kaniko/.docker/config.json:ro" \
-v "$(pwd)/${build_dir}:/workspace" \
gcr.io/kaniko-project/executor:debug \
--destination "${REPO_URL}/${base}:${suite}" --force \
|| return 1
# on successful build, push the image
echo " --- "
echo "Successfully built and pushed ${base}:${suite} with context ${build_dir}"
echo " --- "
}
dofile() {
f=$1
image=${f%Dockerfile}
base=${image%%\/*}
build_dir=$(dirname "$f")
suite=${build_dir##*\/}
if [[ -z "$suite" ]] || [[ "$suite" == "$base" ]]; then
suite=latest
fi
{
$SCRIPT build_and_push_kaniko "${base}" "${suite}" "${build_dir}"
} || {
# add to errors
echo "${base}:${suite}" >> "$ERRORS"
}
echo
echo
}
prescript(){
cd "${DOCKERFILESPATH}"
rm -f errors
git pull
}
main(){
# get the dockerfiles
IFS=$'\n'
files=( $(find . -iname '*Dockerfile' | sed 's|./||' | sort) )
unset IFS
# build all dockerfiles
echo "Running in parallel with ${JOBS} jobs."
parallel --tag --verbose --ungroup -j"${JOBS}" "$SCRIPT" dofile "{1}" ::: "${files[@]}"
if [[ ! -f $ERRORS ]]; then
echo "No errors, hooray!"
else
echo "[ERROR] Some images did not build correctly, see below." >&2
echo "These images failed: $(cat "$ERRORS")" >&2
exit 1
fi
}
run(){
args=$@
f=$1
if [[ "$f" == "" ]]; then
main "$args"
else
$args
fi
}
prescript
run $@
dcleanup

51
gitserver/README.md Normal file
View File

@@ -0,0 +1,51 @@
# Docker-Gitserver
[Docker-Gitserver](https://hub.docker.com/r/uleenucks/gitserver) is a containerized [GIT](https://git-scm.com/) server.
## Requirements
- [Docker](https://www.docker.com/)
- [docker-compose](https://www.docker.com/products/docker-compose)
## Usage
### Starting server
For the initial start, just run the following:
`docker-compose up -d`
Remember the container id (refered to as `$container`),
it's printed as `Creating $container`.
### Starting without docker-compose
If you can't or don't want to use docker-compose, run
```
docker run -d --name git \
-p 22124:22 \
-v "${GIT_DATA}:/home/git/repositories" \
-e "PUBKEY=$(cat ~/.ssh/id_ed25519.pub)" \
uleenucks/gitserver:latest
```
### Stopping server
`docker-compose stop`
### Creating a repo on server
To create a new repo named `$open`, use the following:
#### Open a shell on the server
`docker exec -it $container sh`
The container name `$container` is printed on startup.
#### Create new repository on server
```
create_repo /home/git/repositories/open.git
```
This will create ``open.git`` as a new repository on your server
#### Connecting to your git repository
From any client do the following:
```
git remote add origin ssh://git@<fqdn>:22124/home/git/repositories/open.git
```

View File

@@ -0,0 +1,17 @@
# docker-compose.yml
version: '2'
services:
git:
container_name: git
restart: on-failure:5
image: uleenucks/gitserver
volumes:
- "gitdata:/home/git/repositories"
ports:
- "22124:22"
environment:
- "PUBKEY=$$(cat ~/.ssh/id_ed25519.pub)"
volumes:
gitdata:
driver: local

View File

@@ -0,0 +1,40 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
ENV HOME /root
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk -U upgrade --no-cache --no-progress \
&& apk --no-cache --no-progress add \
bash \
git \
openssh \
&& sed -i "s/#PasswordAuthentication yes/PasswordAuthentication no/" /etc/ssh/sshd_config \
&& sed -i "s/#PubkeyAuthentication yes/PubkeyAuthentication yes/" /etc/ssh/sshd_config \
&& echo -e "AllowUsers git\n" >> /etc/ssh/sshd_config \
&& echo -e "Port 22\n" >> /etc/ssh/sshd_config \
&& addgroup git \
&& adduser -D -S -s /usr/bin/git-shell -h /home/git -g git git \
&& mkdir -p /home/git/.ssh \
&& chown -R git:git /home/git \
&& passwd -u git \
&& git config --global init.defaultBranch main \
&& mkdir /home/git/repositories \
&& rm -rf /tmp/* /var/cache/apk/*
VOLUME /home/git/repositories
ENV HOME /home/git
EXPOSE 22
WORKDIR $HOME
ADD app /app
COPY ./start.sh /
COPY create_repo /usr/bin/create_repo
HEALTHCHECK CMD [ "/app/healthcheck.sh" ]
ENTRYPOINT [ "/start.sh" ]
CMD [ "/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config" ]

View File

@@ -0,0 +1,7 @@
#!/bin/sh
if [ ! -f "/app/health" ]; then
printf 0 > "/app/health"
fi
exit "$(cat /app/health)"

14
gitserver/gitserver/create_repo Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
set -e
set -o pipefail
repo=$1
if [[ "$repo" != *.git ]]; then
repo="${repo}.git"
fi
echo "Creating $repo"
(
cd "$HOME"
git init --bare "$repo"
chown -R git:git "$repo"
)

73
gitserver/gitserver/start.sh Executable file
View File

@@ -0,0 +1,73 @@
#!/bin/sh
set -e
set -o pipefail
[ "$DEBUG" == 'true' ] && set -x
DAEMON=sshd
HOSTKEY=/etc/ssh/ssh_host_ed25519_key
# create the host key if not already created
if [ ! -f "${HOSTKEY}" ]; then
ssh-keygen -A
fi
mkdir -p ${HOME}/.ssh
source /etc/profile
[ "$PUBKEY" ] && echo "$PUBKEY" > ${HOME}/.ssh/authorized_keys
[ "$PUBKEY2" ] && echo "$PUBKEY2" >> ${HOME}/.ssh/authorized_keys
[ "$PUBKEY3" ] && echo "$PUBKEY3" >> ${HOME}/.ssh/authorized_keys
[ "$PUBKEY4" ] && echo "$PUBKEY4" >> ${HOME}/.ssh/authorized_keys
[ "$PUBKEY5" ] && echo "$PUBKEY5" >> ${HOME}/.ssh/authorized_keys
[ "$PUBKEY6" ] && echo "$PUBKEY6" >> ${HOME}/.ssh/authorized_keys
[ "$PUBKEY7" ] && echo "$PUBKEY7" >> ${HOME}/.ssh/authorized_keys
chown -R git:git ${HOME}
chmod -R 755 ${HOME}
# Fix permissions, if writable
if [ -w ${HOME}/.ssh ]; then
chown git:git ${HOME}/.ssh && chmod 700 ${HOME}/.ssh/
fi
if [ -w ${HOME}/.ssh/authorized_keys ]; then
chown git:git ${HOME}/.ssh/authorized_keys
chmod 600 ${HOME}/.ssh/authorized_keys
fi
# Warn if no config
if [ ! -e ${HOME}/.ssh/authorized_keys ]; then
echo "WARNING: No SSH authorized_keys found for git"
fi
# set the default shell
mkdir -p $HOME/git-shell-commands
cat >$HOME/git-shell-commands/no-interactive-login <<\EOF
#!/bin/sh
printf '%s\n' "Hi $USER! You've successfully authenticated, but I do not"
printf '%s\n' "provide interactive shell access."
exit 128
EOF
chmod +x $HOME/git-shell-commands/no-interactive-login
stop() {
echo "Received SIGINT or SIGTERM. Shutting down $DAEMON"
# Get PID
pid=$(cat /var/run/$DAEMON/$DAEMON.pid)
# Set TERM
kill -SIGTERM "${pid}"
# Wait for exit
wait "${pid}"
# All done.
echo "Done."
}
echo "Running $@"
if [ "$(basename $1)" == "$DAEMON" ]; then
trap stop SIGINT SIGTERM
$@ &
pid="$!"
mkdir -p /var/run/$DAEMON && echo "${pid}" > /var/run/$DAEMON/$DAEMON.pid
wait "${pid}" && exit $?
else
exec "$@"
fi

11
htop/Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk -U upgrade --no-cache --no-progress \
&& apk --no-cache --no-progress add htop \
&& rm -rf /tmp/* /var/cache/apk/*
CMD [ "htop" ]

90
libre-nginx/Dockerfile Normal file
View File

@@ -0,0 +1,90 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
#COPY rootfs /
ENV NGINX_VERSION=1.21.4
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& set -ex \
&& apk -U upgrade --no-cache --no-progress \
&& apk add --no-cache --no-progress \
ca-certificates \
libressl \
pcre \
zlib \
su-exec \
&& apk add --no-progress --no-cache --virtual .build-deps \
build-base \
linux-headers \
libressl-dev \
pcre-dev \
wget \
zlib-dev \
&& cd /tmp \
&& wget http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz \
&& tar xzf nginx-${NGINX_VERSION}.tar.gz \
&& cd /tmp/nginx-${NGINX_VERSION} \
&& wget -q https://github.com/nginx-modules/ngx_http_tls_dyn_size/raw/master/nginx__dynamic_tls_records_1.15.5%2B.patch -O dynamic_records.patch \
&& patch -p1 < dynamic_records.patch \
&& ./configure \
--prefix=/etc/nginx \
--sbin-path=/usr/sbin/nginx \
--conf-path=/etc/nginx/nginx.conf \
--error-log-path=/var/log/nginx/error.log \
--http-log-path=/var/log/nginx/access.log \
--pid-path=/tmp/nginx.pid \
--lock-path=/tmp/nginx.lock \
--http-client-body-temp-path=/var/cache/nginx/client_temp \
--http-proxy-temp-path=/var/cache/nginx/proxy_temp \
--http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_gzip_static_module \
--with-http_stub_status_module \
--with-file-aio \
--with-threads \
--with-stream \
--with-stream_ssl_module \
--with-pcre-jit \
--with-http_realip_module \
--with-http_addition_module \
--with-http_sub_module \
--with-http_dav_module \
--with-http_flv_module \
--with-http_mp4_module \
--with-http_gunzip_module \
--with-http_random_index_module \
--with-http_secure_link_module \
--with-http_auth_request_module \
--with-http_slice_module \
--with-mail \
--with-mail_ssl_module \
--with-stream_realip_module \
--without-http_ssi_module \
--without-http_scgi_module \
--without-http_uwsgi_module \
--without-http_geo_module \
--without-http_autoindex_module \
--without-http_split_clients_module \
--without-http_memcached_module \
--without-http_empty_gif_module \
--without-http_browser_module \
&& make -j$(getconf _NPROCESSORS_ONLN) \
&& make install \
&& mkdir -p /var/cache/nginx \
&& strip -s /usr/sbin/nginx \
&& apk del .build-deps \
&& rm -rf /tmp/* /var/cache/apk/*
EXPOSE 8000 4430
COPY rootfs /
RUN chmod +x /usr/local/bin/run.sh
VOLUME /sites-enabled /conf.d /www /passwds /certs /var/log/nginx
CMD [ "run.sh" ]

46
libre-nginx/README.md Normal file
View File

@@ -0,0 +1,46 @@
## uleenucks/libre-nginx
![](https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/115px-Nginx_logo.svg.png)
#### What is this?
This is nginx statically linked against LibreSSL
#### Features
- Based on Alpine Linux.
- nginx built against **LibreSSL**
- **TLS 1.3** patch : use of TLS 1.3 DRAFT is enforced (haven't found another way yet).
- Built using hardening gcc flags.
- Dynamic TLS records patch (cloudflare).
- TTP/2 (+NPN) support.
- Brotli compression support (and configured).
- AIO Threads support.
- No unnessary modules (except fastcgi).
- PCRE-jit enabled.
- Strong configurations included.
- Anonymous webserver signature (headers-more).
#### Notes
- It is required to change the `listen` directive to 8000/4430 instead of 80/443.
- Linux 3.17+, and the latest Docker stable are recommended.
#### Volumes
- **/sites-enabled** : vhosts files (*.conf)
- **/conf.d** : additional configuration files
- **/certs** : SSL/TLS certificates
- **/var/log/nginx** : nginx logs
- **/passwds** : authentication files
- **/www** : put your websites there
#### Build-time variables
- **NGINX_VERSION** : version of nginx
- **GPG_NGINX** : fingerprint of signing key package
- **BUILD_CORES** : number of cores used during compilation
#### How to use it?
https://github.com/hardware/mailserver/wiki/Reverse-proxy-configuration
Some configuration files located in `/etc/nginx/conf` are already provided, you can use them with the `include` directive.
- `ssl_params` : Provides a nice balance between compatibility and security.
- `headers_params` : HSTS (+ preload), XSS protection, etc.
- `proxy_params` : use with `proxy_pass`.

View File

@@ -0,0 +1,6 @@
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Content-Security-Policy "default-src 'self'; script-src 'none'; img-src 'self'; style-src 'self'; font-src 'self'; frame-src 'none'; object-src 'none'; frame-ancestors 'self'; base-uri 'none'; form-action 'none'";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
add_header 'Referrer-Policy' 'strict-origin';

View File

@@ -0,0 +1,6 @@
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Remote-Port $remote_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_redirect off;

View File

@@ -0,0 +1,7 @@
ssl_protocols TLSv1.3 TLSv1.2;
ssl_ecdh_curve secp521r1:secp384r1:prime256v1;
ssl_ciphers EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EECDH+aRSA+SHA256:EECDH+aRSA+RC4:EECDH:EDH+aRSA:RC4:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:!aNULL:!eNULL:!LOW:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 15m;
ssl_session_tickets off;

View File

@@ -0,0 +1,67 @@
worker_processes auto;
pid /tmp/nginx.pid;
daemon off;
pcre_jit on;
events {
worker_connections 2048;
use epoll;
}
http {
limit_conn_zone $binary_remote_addr zone=limit_per_ip:10m;
limit_conn limit_per_ip 128;
limit_req_zone $binary_remote_addr zone=allips:10m rate=150r/s;
limit_req zone=allips burst=150 nodelay;
ssl_dyn_rec_enable on;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log combined;
error_log /var/log/nginx/error.log crit;
fastcgi_temp_path /tmp/fastcgi 1 2;
proxy_temp_path /tmp/proxy 1 2;
client_body_temp_path /tmp/client_body 1 2;
client_body_buffer_size 10K;
client_header_buffer_size 1k;
client_max_body_size 8m;
large_client_header_buffers 2 1k;
aio threads;
sendfile on;
keepalive_timeout 15;
keepalive_disable msie6;
keepalive_requests 100;
tcp_nopush on;
tcp_nodelay on;
server_tokens off;
gzip on;
gzip_comp_level 5;
gzip_min_length 512;
gzip_buffers 4 8k;
gzip_proxied any;
gzip_vary on;
gzip_disable "msie6";
gzip_types
text/css
text/javascript
text/xml
text/plain
text/x-component
application/javascript
application/x-javascript
application/json
application/xml
application/rss+xml
application/vnd.ms-fontobject
font/truetype
font/opentype
image/svg+xml;
include /sites-enabled/*.conf;
}

View File

@@ -0,0 +1,3 @@
#!/bin/sh
chmod -R 700 /certs
exec nginx

13
lynx/Dockerfile Normal file
View File

@@ -0,0 +1,13 @@
# Run Lynx in a container
#
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk -U upgrade --no-cache --no-progress \
&& apk --no-cache --no-progress add lynx \
&& rm -rf /tmp/* /var/cache/apk/*
ENTRYPOINT [ "lynx" ]

11
mtr/Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk -U upgrade --no-cache --no-progress \
&& apk --no-cache --no-progress add mtr \
&& rm -rf /tmp/* /var/cache/apk/*
ENTRYPOINT [ "mtr" ]

11
nmap/Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk -U upgrade --no-cache --no-progress \
&& apk --no-cache --no-progress add nmap \
&& rm -rf /tmp/* /var/cache/apk/*
ENTRYPOINT [ "nmap" ]

11
postgres-backup/.env Normal file
View File

@@ -0,0 +1,11 @@
# 1. Please put the value in double quotes to avoid problems.
# 2. To use the file, you need to map the file to `/.env` in the container.
CRON="0 1 * * *"
BACKUP_FILE_SUFFIX="%Y%m%d"
BACKUP_KEEP_DAYS="14"
TIMEZONE="UTC"
#POSTGRES_HOST=""
#POSTGRES_PORT=""
#POSTGRES_DB=""
#POSTGRES_USER=""
#POSTGRES_PASSWORD=""

View File

@@ -0,0 +1,17 @@
FROM alpine:latest
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
ADD app /app
RUN chmod +x /app/*.sh \
&& apk add --no-cache \
postgresql15-client \
tzdata \
7zip \
bash \
supercronic
HEALTHCHECK CMD [ "/app/healthcheck.sh" ]
ENTRYPOINT ["/app/entrypoint.sh"]

View File

@@ -0,0 +1,59 @@
#!/bin/bash
. /app/includes.sh
function clear_dir() {
rm -rf "${BACKUP_DIR}"
}
function backup_init() {
NOW="$(date +"${BACKUP_FILE_DATE_FORMAT}")"
# backup postgresql database file
BACKUP_FILE_DB_POSTGRESQL="${BACKUP_DIR}/db.${NOW}.dump"
# backup zip file
BACKUP_FILE_ZIP="${BACKUP_DIR}/../backup.${NOW}.7z"
mkdir -p "${BACKUP_DIR}"
}
function backup_db_postgresql() {
echo "backup postgresql database"
pg_dump -Fc -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -d "${POSTGRES_DB}" -U "${POSTGRES_USER}" -f "${BACKUP_FILE_DB_POSTGRESQL}"
if [[ $? != 0 ]]; then
echo "backup postgresql database failed"
exit 1
fi
}
function backup_package() {
echo "package backup file"
7z a -t7z -m0=lzma2 -mx=9 -mfb=64 -md=32m -ms=on -mhe=on "${BACKUP_FILE_ZIP}" "${BACKUP_DIR}"/*
ls -lah "${BACKUP_DIR}"
echo "display backup ${ZIP_TYPE} file list"
7z l "${BACKUP_FILE_ZIP}"
chown 1000:100 "${BACKUP_FILE_ZIP}"
}
function clear_history() {
if [[ "${BACKUP_KEEP_DAYS}" -gt 0 ]]; then
echo find "${BACKUP_DIR}" -mtime +"${BACKUP_KEEP_DAYS}" -exec rm -rf {} \;
fi
}
echo "running the backup program at $(date +"%Y-%m-%d %H:%M:%S %Z")"
init_env
clear_dir
backup_init
backup_db_postgresql
backup_package
clear_dir
clear_history

View File

@@ -0,0 +1,37 @@
#!/bin/bash
. /app/includes.sh
# restore
if [[ "$1" == "restore" ]]; then
#. /app/restore.sh
#shift
#restore $*
echo "not implemented yet"
exit 0
fi
function configure_cron() {
local FIND_CRON_COUNT="$(grep -c 'backup.sh' "${CRON_CONFIG_FILE}" 2> /dev/null)"
if [[ "${FIND_CRON_COUNT}" -eq 0 ]]; then
echo "${CRON} bash /app/backup.sh" >> "${CRON_CONFIG_FILE}"
fi
}
init_env
configure_postgresql
configure_cron
# backup manually
if [[ "$1" == "backup" ]]; then
echo "Manually triggering a backup will only execute the backup script once, and the container will exit upon completion."
bash "/app/backup.sh"
exit 0
fi
# foreground run crond
exec supercronic -passthrough-logs -quiet "${CRON_CONFIG_FILE}"

View File

@@ -0,0 +1,10 @@
#!/bin/sh
# shellcheck disable=SC1091
. /app/includes.sh
if [ ! -f "${HEALTHCHECK_FILE}" ]; then
printf 0 > "${HEALTHCHECK_FILE}"
fi
exit "$(cat "${HEALTHCHECK_FILE}")"

View File

@@ -0,0 +1,166 @@
#!/bin/bash
ENV_FILE="/.env"
CRON_CONFIG_FILE="${HOME}/crontabs"
BACKUP_DIR="/backups/tmp"
RESTORE_DIR="/restore"
RESTORE_EXTRACT_DIR="/extract"
########################################
# Check file is exist.
# Arguments:
# file
########################################
function check_file_exist() {
if [[ ! -f "$1" ]]; then
echo "cannot access $1: No such file"
exit 1
fi
}
########################################
# Check directory is exist.
# Arguments:
# directory
########################################
function check_dir_exist() {
if [[ ! -d "$1" ]]; then
echo "cannot access $1: No such directory"
exit 1
fi
}
########################################
# Configure PostgreSQL password file.
# Arguments:
# None
########################################
function configure_postgresql() {
echo "${POSTGRES_HOST}:${POSTGRES_PORT}:${POSTGRES_DB}:${POSTGRES_USER}:${POSTGRES_PASSWORD}" > ~/.pgpass
chmod 0600 ~/.pgpass
}
########################################
# Export variables from .env file.
# Arguments:
# None
# Outputs:
# variables with prefix 'DOTENV_'
# Reference:
# https://gist.github.com/judy2k/7656bfe3b322d669ef75364a46327836#gistcomment-3632918
########################################
function export_env_file() {
if [[ -f "${ENV_FILE}" ]]; then
echo "find \"${ENV_FILE}\" file and export variables"
set -a
source <(cat "${ENV_FILE}" | sed -e '/^#/d;/^\s*$/d' -e 's/\(\w*\)[ \t]*=[ \t]*\(.*\)/DOTENV_\1=\2/')
set +a
fi
}
########################################
# Get variables from
# environment variables,
# secret file in environment variables,
# secret file in .env file,
# environment variables in .env file.
# Arguments:
# variable name
# Outputs:
# variable value
########################################
function get_env() {
local VAR="$1"
local VAR_FILE="${VAR}_FILE"
local VAR_DOTENV="DOTENV_${VAR}"
local VAR_DOTENV_FILE="DOTENV_${VAR_FILE}"
local VALUE=""
if [[ -n "${!VAR:-}" ]]; then
VALUE="${!VAR}"
elif [[ -n "${!VAR_FILE:-}" ]]; then
VALUE="$(cat "${!VAR_FILE}")"
elif [[ -n "${!VAR_DOTENV_FILE:-}" ]]; then
VALUE="$(cat "${!VAR_DOTENV_FILE}")"
elif [[ -n "${!VAR_DOTENV:-}" ]]; then
VALUE="${!VAR_DOTENV}"
fi
export "${VAR}=${VALUE}"
}
########################################
# Initialization environment variables.
# Arguments:
# None
# Outputs:
# environment variables
########################################
function init_env() {
# export
export_env_file
init_env_db
# CRON
get_env CRON
CRON="${CRON:-"5 * * * *"}"
# BACKUP_KEEP_DAYS
get_env BACKUP_KEEP_DAYS
BACKUP_KEEP_DAYS="${BACKUP_KEEP_DAYS:-"0"}"
# BACKUP_FILE_DATE_FORMAT
get_env BACKUP_FILE_SUFFIX
get_env BACKUP_FILE_DATE
get_env BACKUP_FILE_DATE_SUFFIX
BACKUP_FILE_DATE="$(echo "${BACKUP_FILE_DATE:-"%Y%m%d"}${BACKUP_FILE_DATE_SUFFIX}" | sed 's/[^0-9a-zA-Z%_-]//g')"
BACKUP_FILE_DATE_FORMAT="$(echo "${BACKUP_FILE_SUFFIX:-"${BACKUP_FILE_DATE}"}" | sed 's/\///g')"
# HEALTHCHECK_FILE
get_env HEALTHCHECK_FILE
HEALTHCHECK_FILE="${HEALTHCHECK_FILE:-/app/health}"
# TIMEZONE
get_env TIMEZONE
local TIMEZONE_MATCHED_COUNT=$(ls "/usr/share/zoneinfo/${TIMEZONE}" 2> /dev/null | wc -l)
if [[ "${TIMEZONE_MATCHED_COUNT}" -ne 1 ]]; then
TIMEZONE="UTC"
fi
echo "========================================"
echo "DB_URL: postgresql://${POSTGRES_USER}:***(${#POSTGRES_PASSWORD} Chars)@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}"
echo "========================================"
echo "CRON: ${CRON}"
echo "BACKUP_FILE_DATE_FORMAT: ${BACKUP_FILE_DATE_FORMAT} (example \"[filename].$(date +"${BACKUP_FILE_DATE_FORMAT}").[ext]\")"
echo "BACKUP_KEEP_DAYS: ${BACKUP_KEEP_DAYS}"
echo "TIMEZONE: ${TIMEZONE}"
echo HEALTHCHECK_FILE="${HEALTHCHECK_FILE:-$APP_DIR/health}"
echo "========================================"
}
function init_env_db() {
DB_TYPE="POSTGRESQL"
# POSTGRES_HOST
get_env POSTGRES_HOST
# PG_PORT
get_env POSTGRES_PORT
POSTGRES_PORT="${POSTGRES_PORT:-"5432"}"
# POSTGRES_DB
get_env POSTGRES_DB
POSTGRES_DB="${POSTGRES_DB:-"postgres"}"
# POSTGRES_USER
get_env POSTGRES_USER
POSTGRES_USER="${POSTGRES_USER:-"postgres"}"
# POSTGRES_PASSWORD
get_env POSTGRES_PASSWORD
}

View File

@@ -0,0 +1,13 @@
---
version: '3.7'
services:
postgres_backup:
restart: on-failure
image: uleenucks/postgres-backup
init: true
volumes:
- "/data/services/postgresql/data:/data"
- "/data/services/postgresql/backup.env:/.env"
- "/data/backups/postgresql/backups:/backups"
env_file:
- "/data/backups/postgresql/postgres.env"

View File

@@ -0,0 +1,5 @@
FROM uleenucks/powershell
RUN pwsh -c "Install-Module -Name AzureRM.NetCore"
# Import-Module -Name AzureRM
ENTRYPOINT [ "pwsh" ]

24
powershell/Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM debian:bullseye-slim
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN apt-get update && apt-get install -y -q --no-install-recommends \
curl \
ca-certificates \
apt-transport-https \
gnupg2
RUN curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add -
RUN echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-debian-bullseye-prod bullseye main" > /etc/apt/sources.list.d/microsoft.list
RUN apt-get update && apt-get install -y -q --no-install-recommends \
powershell \
&& apt-get purge -y -q \
ca-certificates \
curl \
apt-transport-https \
gnupg2 \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*
ENTRYPOINT [ "/usr/bin/pwsh" ]

19
pyweb/Dockerfile Normal file
View File

@@ -0,0 +1,19 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk -U --no-cache --no-progress upgrade \
&& apk add --no-cache --no-progress python3 curl \
&& mkdir /output \
&& rm -rf /tmp/* /var/cache/apk/*
WORKDIR /output
EXPOSE 1313
ADD app /app
HEALTHCHECK CMD [ "/app/healthcheck.sh" ]
ENTRYPOINT [ "/app/entrypoint.sh" ]

3
pyweb/app/entrypoint.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
/usr/bin/python3 -m http.server 1313

7
pyweb/app/healthcheck.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
if [ ! -f "/app/health" ]; then
printf 0 > "/app/health"
fi
exit "$(cat /app/health)"

12
shellcheck/Dockerfile Normal file
View File

@@ -0,0 +1,12 @@
FROM debian:bookworm-slim
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN apt-get update && apt-get install -y \
file \
--no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y shellcheck
CMD [ "shellcheck" ]

54
test.sh Executable file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
# shamelessly stolen from Jessie (https://github.com/jessfraz/dockerfiles)
set -e
set -o pipefail
# this is kind of an expensive check, so let's not do this twice if we
# are running more than one validate bundlescript
VALIDATE_REPO='https://github.com/uleenucks/dockerfiles.git'
VALIDATE_BRANCH='master'
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
validate_diff() {
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
git diff "$VALIDATE_COMMIT_DIFF" "$@"
else
git diff HEAD~ "$@"
fi
}
# get the dockerfiles changed
IFS=$'\n'
files=( $(validate_diff --name-only -- '*Dockerfile') )
unset IFS
# build the changed dockerfiles
for f in "${files[@]}"; do
if ! [[ -e "$f" ]]; then
continue
fi
image=${f%Dockerfile}
base=${image%%\/*}
suite=${image##*\/}
build_dir=$(dirname "$f")
if [[ -z "$suite" ]]; then
suite=latest
fi
(
set -x
docker build -t "${base}":"${suite}" "${build_dir}"
)
echo " --- "
echo "Successfully built ${base}:${suite} with context ${build_dir}"
echo " --- "
done

9
traceroute/Dockerfile Normal file
View File

@@ -0,0 +1,9 @@
FROM alpine:3.18
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community/" >> /etc/apk/repositories \
&& echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories \
&& apk --no-cache --no-progress add tcptraceroute
ENTRYPOINT [ "tcptraceroute" ]

6
vaultwarden-backup/.env Normal file
View File

@@ -0,0 +1,6 @@
# 1. Please put the value in double quotes to avoid problems.
# 2. To use the file, you need to map the file to `/.env` in the container.
CRON="0 1 * * *"
BACKUP_FILE_SUFFIX="%Y%m%d"
BACKUP_KEEP_DAYS="14"
TIMEZONE="UTC"

View File

@@ -0,0 +1,17 @@
FROM alpine:latest
LABEL maintainer.name="Uwe Hermann"\
maintainer.email="uh@uleenucks.de"
ADD app /app
RUN chmod +x /app/*.sh \
&& apk add --no-cache \
postgresql15-client \
tzdata \
7zip \
bash \
supercronic
HEALTHCHECK CMD [ "/app/healthcheck.sh" ]
ENTRYPOINT ["/app/entrypoint.sh"]

View File

@@ -0,0 +1,132 @@
#!/bin/bash
. /app/includes.sh
function clear_dir() {
rm -rf "${BACKUP_DIR}"
}
function backup_init() {
NOW="$(date +"${BACKUP_FILE_DATE_FORMAT}")"
# backup vaultwarden database file (postgresql)
BACKUP_FILE_DB_POSTGRESQL="${BACKUP_DIR}/db.${NOW}.dump"
# backup vaultwarden config file
BACKUP_FILE_CONFIG="${BACKUP_DIR}/config.${NOW}.json"
# backup vaultwarden rsakey files
BACKUP_FILE_RSAKEY="${BACKUP_DIR}/rsakey.${NOW}.tar"
# backup vaultwarden attachments directory
BACKUP_FILE_ATTACHMENTS="${BACKUP_DIR}/attachments.${NOW}.tar"
# backup vaultwarden sends directory
BACKUP_FILE_SENDS="${BACKUP_DIR}/sends.${NOW}.tar"
# backup zip file
BACKUP_FILE_ZIP="${BACKUP_DIR}/../backup.${NOW}.7z"
}
function backup_db_postgresql() {
echo "backup vaultwarden postgresql database"
pg_dump -Fc -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -d "${POSTGRES_DB}" -U "${POSTGRES_USER}" -f "${BACKUP_FILE_DB_POSTGRESQL}"
if [[ $? != 0 ]]; then
echo "backup vaultwarden postgresql database failed"
exit 1
fi
}
function backup_config() {
echo "backup vaultwarden config"
if [[ -f "${DATA_CONFIG}" ]]; then
cp -f "${DATA_CONFIG}" "${BACKUP_FILE_CONFIG}"
else
echo "vaultwarden config not found, skipping"
fi
}
function backup_rsakey() {
echo "backup vaultwarden rsakey"
local FIND_RSAKEY=$(find "${DATA_RSAKEY_DIRNAME}" -name "${DATA_RSAKEY_BASENAME}*" | xargs -I {} basename {})
local FIND_RSAKEY_COUNT=$(echo "${FIND_RSAKEY}" | wc -l)
if [[ "${FIND_RSAKEY_COUNT}" -gt 0 ]]; then
echo "${FIND_RSAKEY}" | tar -c -C "${DATA_RSAKEY_DIRNAME}" -f "${BACKUP_FILE_RSAKEY}" -T -
echo "display rsakey tar file list"
tar -tf "${BACKUP_FILE_RSAKEY}"
else
echo "vaultwarden rsakey not found, skipping"
fi
}
function backup_attachments() {
echo "backup vaultwarden attachments"
if [[ -d "${DATA_ATTACHMENTS}" ]]; then
tar -c -C "${DATA_ATTACHMENTS_DIRNAME}" -f "${BACKUP_FILE_ATTACHMENTS}" "${DATA_ATTACHMENTS_BASENAME}"
echo "display attachments tar file list"
tar -tf "${BACKUP_FILE_ATTACHMENTS}"
else
echo "vaultwarden attachments directory not found, skipping"
fi
}
function backup_sends() {
echo "backup vaultwarden sends"
if [[ -d "${DATA_SENDS}" ]]; then
tar -c -C "${DATA_SENDS_DIRNAME}" -f "${BACKUP_FILE_SENDS}" "${DATA_SENDS_BASENAME}"
echo "display sends tar file list"
tar -tf "${BACKUP_FILE_SENDS}"
else
echo "vaultwarden sends directory not found, skipping"
fi
}
function backup() {
mkdir -p "${BACKUP_DIR}"
backup_db_postgresql
backup_config
backup_rsakey
backup_attachments
backup_sends
ls -lah "${BACKUP_DIR}"
}
function backup_package() {
echo "package backup file"
7z a -t7z -m0=lzma2 -mx=9 -mfb=64 -md=32m -ms=on -mhe=on "${BACKUP_FILE_ZIP}" "${BACKUP_DIR}"/*
ls -lah "${BACKUP_DIR}"
echo "display backup ${ZIP_TYPE} file list"
7z l "${BACKUP_FILE_ZIP}"
chown 1000:100 "${BACKUP_FILE_ZIP}"
}
function clear_history() {
if [[ "${BACKUP_KEEP_DAYS}" -gt 0 ]]; then
echo find "${BACKUP_DIR}" -mtime +"${BACKUP_KEEP_DAYS}" -exec rm -rf {} \;
fi
}
echo "running the backup program at $(date +"%Y-%m-%d %H:%M:%S %Z")"
init_env
clear_dir
backup_init
backup
backup_package
clear_dir
clear_history

View File

@@ -0,0 +1,37 @@
#!/bin/bash
. /app/includes.sh
# restore
if [[ "$1" == "restore" ]]; then
#. /app/restore.sh
#shift
#restore $*
echo "not implemented yet"
exit 0
fi
function configure_cron() {
local FIND_CRON_COUNT="$(grep -c 'backup.sh' "${CRON_CONFIG_FILE}" 2> /dev/null)"
if [[ "${FIND_CRON_COUNT}" -eq 0 ]]; then
echo "${CRON} bash /app/backup.sh" >> "${CRON_CONFIG_FILE}"
fi
}
init_env
configure_postgresql
configure_cron
# backup manually
if [[ "$1" == "backup" ]]; then
echo "Manually triggering a backup will only execute the backup script once, and the container will exit upon completion."
bash "/app/backup.sh"
exit 0
fi
# foreground run crond
exec supercronic -passthrough-logs -quiet "${CRON_CONFIG_FILE}"

View File

@@ -0,0 +1,10 @@
#!/bin/sh
# shellcheck disable=SC1091
. /app/includes.sh
if [ ! -f "${HEALTHCHECK_FILE}" ]; then
printf 0 > "${HEALTHCHECK_FILE}"
fi
exit "$(cat "${HEALTHCHECK_FILE}")"

View File

@@ -0,0 +1,196 @@
#!/bin/bash
ENV_FILE="/.env"
CRON_CONFIG_FILE="${HOME}/crontabs"
BACKUP_DIR="/backups/tmp"
RESTORE_DIR="/restore"
RESTORE_EXTRACT_DIR="/extract"
########################################
# Check file is exist.
# Arguments:
# file
########################################
function check_file_exist() {
if [[ ! -f "$1" ]]; then
echo "cannot access $1: No such file"
exit 1
fi
}
########################################
# Check directory is exist.
# Arguments:
# directory
########################################
function check_dir_exist() {
if [[ ! -d "$1" ]]; then
echo "cannot access $1: No such directory"
exit 1
fi
}
########################################
# Configure PostgreSQL password file.
# Arguments:
# None
########################################
function configure_postgresql() {
echo "${POSTGRES_HOST}:${POSTGRES_PORT}:${POSTGRES_DB}:${POSTGRES_USER}:${POSTGRES_PASSWORD}" > ~/.pgpass
chmod 0600 ~/.pgpass
}
########################################
# Export variables from .env file.
# Arguments:
# None
# Outputs:
# variables with prefix 'DOTENV_'
# Reference:
# https://gist.github.com/judy2k/7656bfe3b322d669ef75364a46327836#gistcomment-3632918
########################################
function export_env_file() {
if [[ -f "${ENV_FILE}" ]]; then
echo "find \"${ENV_FILE}\" file and export variables"
set -a
source <(cat "${ENV_FILE}" | sed -e '/^#/d;/^\s*$/d' -e 's/\(\w*\)[ \t]*=[ \t]*\(.*\)/DOTENV_\1=\2/')
set +a
fi
}
########################################
# Get variables from
# environment variables,
# secret file in environment variables,
# secret file in .env file,
# environment variables in .env file.
# Arguments:
# variable name
# Outputs:
# variable value
########################################
function get_env() {
local VAR="$1"
local VAR_FILE="${VAR}_FILE"
local VAR_DOTENV="DOTENV_${VAR}"
local VAR_DOTENV_FILE="DOTENV_${VAR_FILE}"
local VALUE=""
if [[ -n "${!VAR:-}" ]]; then
VALUE="${!VAR}"
elif [[ -n "${!VAR_FILE:-}" ]]; then
VALUE="$(cat "${!VAR_FILE}")"
elif [[ -n "${!VAR_DOTENV_FILE:-}" ]]; then
VALUE="$(cat "${!VAR_DOTENV_FILE}")"
elif [[ -n "${!VAR_DOTENV:-}" ]]; then
VALUE="${!VAR_DOTENV}"
fi
export "${VAR}=${VALUE}"
}
########################################
# Initialization environment variables.
# Arguments:
# None
# Outputs:
# environment variables
########################################
function init_env() {
# export
export_env_file
init_env_dir
init_env_db
# CRON
get_env CRON
CRON="${CRON:-"5 * * * *"}"
# BACKUP_KEEP_DAYS
get_env BACKUP_KEEP_DAYS
BACKUP_KEEP_DAYS="${BACKUP_KEEP_DAYS:-"0"}"
# BACKUP_FILE_DATE_FORMAT
get_env BACKUP_FILE_SUFFIX
get_env BACKUP_FILE_DATE
get_env BACKUP_FILE_DATE_SUFFIX
BACKUP_FILE_DATE="$(echo "${BACKUP_FILE_DATE:-"%Y%m%d"}${BACKUP_FILE_DATE_SUFFIX}" | sed 's/[^0-9a-zA-Z%_-]//g')"
BACKUP_FILE_DATE_FORMAT="$(echo "${BACKUP_FILE_SUFFIX:-"${BACKUP_FILE_DATE}"}" | sed 's/\///g')"
# HEALTHCHECK_FILE
get_env HEALTHCHECK_FILE
HEALTHCHECK_FILE="${HEALTHCHECK_FILE:-/app/health}"
# TIMEZONE
get_env TIMEZONE
local TIMEZONE_MATCHED_COUNT=$(ls "/usr/share/zoneinfo/${TIMEZONE}" 2> /dev/null | wc -l)
if [[ "${TIMEZONE_MATCHED_COUNT}" -ne 1 ]]; then
TIMEZONE="UTC"
fi
echo "========================================"
echo "DATA_DIR: ${DATA_DIR}"
echo "DATA_CONFIG: ${DATA_CONFIG}"
echo "DATA_RSAKEY: ${DATA_RSAKEY}"
echo "DATA_ATTACHMENTS: ${DATA_ATTACHMENTS}"
echo "DATA_SENDS: ${DATA_SENDS}"
echo "========================================"
echo "DB_URL: postgresql://${POSTGRES_USER}:***(${#POSTGRES_PASSWORD} Chars)@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}"
echo "========================================"
echo "CRON: ${CRON}"
echo "BACKUP_FILE_DATE_FORMAT: ${BACKUP_FILE_DATE_FORMAT} (example \"[filename].$(date +"${BACKUP_FILE_DATE_FORMAT}").[ext]\")"
echo "BACKUP_KEEP_DAYS: ${BACKUP_KEEP_DAYS}"
echo "TIMEZONE: ${TIMEZONE}"
echo HEALTHCHECK_FILE="${HEALTHCHECK_FILE:-$APP_DIR/health}"
echo "========================================"
}
function init_env_dir() {
# DATA_DIR
get_env DATA_DIR
DATA_DIR="${DATA_DIR:-"/data"}"
check_dir_exist "${DATA_DIR}"
# DATA_CONFIG
DATA_CONFIG="${DATA_DIR}/config.json"
# DATA_ATTACHMENTS
get_env DATA_ATTACHMENTS
DATA_ATTACHMENTS="$(dirname "${DATA_ATTACHMENTS:-"${DATA_DIR}/attachments"}/useless")"
DATA_ATTACHMENTS_DIRNAME="$(dirname "${DATA_ATTACHMENTS}")"
DATA_ATTACHMENTS_BASENAME="$(basename "${DATA_ATTACHMENTS}")"
# DATA_SEND
get_env DATA_SENDS
DATA_SENDS="$(dirname "${DATA_SENDS:-"${DATA_DIR}/sends"}/useless")"
DATA_SENDS_DIRNAME="$(dirname "${DATA_SENDS}")"
DATA_SENDS_BASENAME="$(basename "${DATA_SENDS}")"
}
function init_env_db() {
DB_TYPE="POSTGRESQL"
# POSTGRES_HOST
get_env POSTGRES_HOST
# PG_PORT
get_env POSTGRES_PORT
POSTGRES_PORT="${POSTGRES_PORT:-"5432"}"
# POSTGRES_DB
get_env POSTGRES_DB
POSTGRES_DB="${POSTGRES_DB:-"vaultwarden"}"
# POSTGRES_USER
get_env POSTGRES_USER
POSTGRES_USER="${POSTGRES_USER:-"vaultwarden"}"
# POSTGRES_PASSWORD
get_env POSTGRES_PASSWORD
}

View File

@@ -0,0 +1,38 @@
---
version: '3.7'
services:
vaultwarden:
image: vaultwarden/server:latest
container_name: vaultwarden
restart: always
environment:
WEBSOCKET_ENABLED: "true" # Enable WebSocket notifications.
volumes:
- "/data/services/vaultwarden/vw-data:/data"
env_file:
- "/data/services/vaultwarden/.env"
depends_on:
- vaultwarden_database
vaultwarden_database:
restart: always
image: postgres:14-alpine
volumes:
- "/data/databases/vaultwarden:/var/lib/postgresql/data"
env_file:
- "/data/services/vaultwarden/postgres.env"
vaultwarden_backup:
restart: on-failure
image: uleenucks/vaultwarden-backup
init: true
depends_on:
- vaultwarden
- vaultwarden_database
volumes:
- "/data/services/vaultwarden/vw-data:/data"
- "/data/services/vaultwarden/backup.env:/.env"
- "/data/backups/vaultwarden/backups:/backups"
env_file:
- "/data/services/vaultwarden/.env"
- "/data/services/vaultwarden/postgres.env"