Initial commit - forked from the corresponding Smartfront repositories.
This commit is contained in:
commit
7803630c76
10
.gitignore
vendored
Normal file
10
.gitignore
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# Ignore backup files
|
||||||
|
*.bak
|
||||||
|
*~
|
||||||
|
# Ignore lock files
|
||||||
|
~$*
|
||||||
|
.~lock.*
|
||||||
|
# Ignore documentation
|
||||||
|
*.doc
|
||||||
|
*.docx
|
||||||
|
*.odt
|
95
.templates/apache2/apache2.conf
Normal file
95
.templates/apache2/apache2.conf
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
# Apache2 configuration for a virtualhost proxied to a Docker service.
|
||||||
|
# Uses https://github.com/acmesh-official/acme.sh to manage SSL certificates.
|
||||||
|
|
||||||
|
<VirtualHost *:80 >
|
||||||
|
ServerAdmin webmaster@$PAR_SERVERNAME
|
||||||
|
ServerName $PAR_SERVERNAME
|
||||||
|
# ServerAlias $PAR_SERVERNAMES
|
||||||
|
DocumentRoot /var/www/html
|
||||||
|
|
||||||
|
# Common log settings.
|
||||||
|
ErrorLog $PAR_SERVICE/logs/web/error.log
|
||||||
|
CustomLog $PAR_SERVICE/logs/web/access.log combined
|
||||||
|
|
||||||
|
# Custom error messages.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
<Macro try_other $response>
|
||||||
|
ErrorDocument $response "<span style='font-size: x-large'>Sorry try <a href='http://$PAR_SERVERNAME/$PAR_LOCATION'>http://$PAR_SERVERNAME/$PAR_LOCATION</a> instead.</span>"
|
||||||
|
</Macro>
|
||||||
|
<Macro try_later $response>
|
||||||
|
ErrorDocument $response "<span style='font-size: x-large'>Sorry something went wrong. Try again a bit later.<br>\
|
||||||
|
You may report this at <a href='mailto:webmaster@$PAR_SERVERNAME'>webmaster@$PAR_SERVERNAME</a>.</span>"
|
||||||
|
</Macro>
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# Permanent redirect to https.
|
||||||
|
<IfModule mod_rewrite.c>
|
||||||
|
# RewriteEngine On
|
||||||
|
# RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R=301,L]
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# No static service.
|
||||||
|
# Sequence matters: http://httpd.apache.org/docs/2.4/sections.html#file-and-web
|
||||||
|
# <Location />
|
||||||
|
# Require all denied
|
||||||
|
# # Custom error message.
|
||||||
|
# <IfModule mod_macro.c>
|
||||||
|
# Use try_other 403
|
||||||
|
# Use try_other 404
|
||||||
|
# </IfModule>
|
||||||
|
# </Location>
|
||||||
|
|
||||||
|
# Let's Encrypt (acme.sh) support.
|
||||||
|
<Location /.well-known/>
|
||||||
|
<IfModule mod_proxy.c>
|
||||||
|
Require all granted
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyPass http://$PAR_ACMEHOST:$PAR_ACMEPORT/
|
||||||
|
ProxyPassReverse http://$PAR_ACMEHOST:$PAR_ACMEPORT/
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_later 500
|
||||||
|
Use try_later 502
|
||||||
|
Use try_later 503
|
||||||
|
Use try_later 504
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
<IfModule !mod_proxy.c>
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_other 403
|
||||||
|
Use try_other 404
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
<Location /$PAR_LOCATION>
|
||||||
|
<IfModule mod_proxy.c>
|
||||||
|
Require all granted
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyPass http://$PAR_PROXYHOST:$PAR_PROXYPORT/$PAR_LOCATION
|
||||||
|
ProxyPassReverse http://$PAR_PROXYHOST:$PAR_PROXYPORT/$PAR_LOCATION
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_later 500
|
||||||
|
Use try_later 502
|
||||||
|
Use try_later 503
|
||||||
|
Use try_later 504
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
<IfModule !mod_proxy.c>
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_later 403
|
||||||
|
Use try_later 404
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
# XSS protection
|
||||||
|
<IfModule mod_headers.c>
|
||||||
|
Header set X-Frame-Options SAMEORIGIN
|
||||||
|
Header set X-Content-Type-Options nosniff
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
</Virtualhost>
|
111
.templates/apache2/apache2_ssl.conf
Normal file
111
.templates/apache2/apache2_ssl.conf
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
# Apache2 SSL configuration for a virtualhost proxied to a Docker service.
|
||||||
|
# Uses https://github.com/acmesh-official/acme.sh to manage SSL certificates.
|
||||||
|
|
||||||
|
<IfModule mod_ssl.c>
|
||||||
|
<VirtualHost *:443 >
|
||||||
|
ServerAdmin webmaster@$PAR_SERVERNAME
|
||||||
|
ServerName $PAR_SERVERNAME
|
||||||
|
# ServerAlias $PAR_SERVERNAMES
|
||||||
|
DocumentRoot /var/www/html
|
||||||
|
|
||||||
|
# Common log settings.
|
||||||
|
ErrorLog $PAR_SERVICE/logs/web/error.log
|
||||||
|
CustomLog $PAR_SERVICE/logs/web/access.log combined
|
||||||
|
|
||||||
|
# Custom error messages.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
<Macro try_other $response>
|
||||||
|
ErrorDocument $response "<span style='font-size: x-large'>Sorry try <a href='http://$PAR_SERVERNAME/$PAR_LOCATION'>http://$PAR_SERVERNAME/$PAR_LOCATION</a> instead.</span>"
|
||||||
|
</Macro>
|
||||||
|
<Macro try_later $response>
|
||||||
|
ErrorDocument $response "<span style='font-size: x-large'>Sorry something went wrong. Try again a bit later.<br>\
|
||||||
|
You may report this at <a href='mailto:webmaster@$PAR_SERVERNAME'>webmaster@$PAR_SERVERNAME</a>.</span>"
|
||||||
|
</Macro>
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# No static service.
|
||||||
|
# Sequence matters: http://httpd.apache.org/docs/2.4/sections.html#file-and-web
|
||||||
|
# <Location />
|
||||||
|
# Require all denied
|
||||||
|
# # Custom error message.
|
||||||
|
# <IfModule mod_macro.c>
|
||||||
|
# Use try_other 403
|
||||||
|
# Use try_other 404
|
||||||
|
# </IfModule>
|
||||||
|
# </Location>
|
||||||
|
|
||||||
|
# Let's Encrypt (acme.sh) support.
|
||||||
|
<Location /.well-known/>
|
||||||
|
<IfModule mod_proxy.c>
|
||||||
|
Require all granted
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyPass http://$PAR_ACMEHOST:$PAR_ACMEPORT/
|
||||||
|
ProxyPassReverse http://$PAR_ACMEHOST:$PAR_ACMEPORT/
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_later 500
|
||||||
|
Use try_later 502
|
||||||
|
Use try_later 503
|
||||||
|
Use try_later 504
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
<IfModule !mod_proxy.c>
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_other 403
|
||||||
|
Use try_other 404
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
<Location /$PAR_LOCATION>
|
||||||
|
<IfModule mod_proxy.c>
|
||||||
|
Require all granted
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyPass http://$PAR_PROXYHOST:$PAR_PROXYPORT/$PAR_LOCATION
|
||||||
|
ProxyPassReverse http://$PAR_PROXYHOST:$PAR_PROXYPORT/$PAR_LOCATION
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_later 500
|
||||||
|
Use try_later 502
|
||||||
|
Use try_later 503
|
||||||
|
Use try_later 504
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
<IfModule !mod_proxy.c>
|
||||||
|
# Custom error message.
|
||||||
|
<IfModule mod_macro.c>
|
||||||
|
Use try_later 403
|
||||||
|
Use try_later 404
|
||||||
|
</IfModule>
|
||||||
|
</IfModule>
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
##################################################################################
|
||||||
|
# The SSL part
|
||||||
|
# https://ssl-config.mozilla.org/
|
||||||
|
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile $PAR_SERVICE/configs/acme/$PAR_SERVERNAME/fullchain.cer
|
||||||
|
SSLCertificateKeyFile $PAR_SERVICE/configs/acme/$PAR_SERVERNAME/$PAR_SERVERNAME.key
|
||||||
|
|
||||||
|
# Settings to achieve 'A' grade on https://www.ssllabs.com/ssltest/
|
||||||
|
SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1
|
||||||
|
SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||||
|
SSLHonorCipherOrder off
|
||||||
|
SSLSessionTickets off
|
||||||
|
|
||||||
|
# HTTP/2, if available.
|
||||||
|
<IfModule mod_http2.c>
|
||||||
|
Protocols h2 http/1.1
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# HTTP Strict Transport Security and XSS protection.
|
||||||
|
<IfModule mod_headers.c>
|
||||||
|
Header always set Strict-Transport-Security "max-age=63072000"
|
||||||
|
Header set X-Frame-Options SAMEORIGIN
|
||||||
|
Header set X-Content-Type-Options nosniff
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
</Virtualhost>
|
||||||
|
</IfModule>
|
2
.templates/apache2/sites-available/services.conf
Normal file
2
.templates/apache2/sites-available/services.conf
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# Includes the Docker services' configurations.
|
||||||
|
IncludeOptional $PAR_SERVICEBASE/.apache2/*.conf
|
176
.templates/bin/.launcher
Executable file
176
.templates/bin/.launcher
Executable file
@ -0,0 +1,176 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Simple launcher script to start a worker script within a service or
|
||||||
|
# one for every service.
|
||||||
|
#
|
||||||
|
# Services are subfolders of the $SERVICE_BASE parameter set below.
|
||||||
|
# The worker script is an executable file within the service's $TOOLS_BASE
|
||||||
|
# folder. The launcher supposes that the filename of the worker script
|
||||||
|
# is the same as the launcher's.
|
||||||
|
#
|
||||||
|
# Without command line parameters the launcher enumerates all services
|
||||||
|
# and launches every service's worker script. Optionally waits between
|
||||||
|
# launches if is prescribed in $SLEEP_BETWEEN parameter. This operation
|
||||||
|
# needs to be forced by --doit command line option.
|
||||||
|
#
|
||||||
|
# With at least one command line parameter the launcher launches only one
|
||||||
|
# worker script, which is within the service named in the first parameter.
|
||||||
|
# If the supposed service or the worker script doesn't exist the script
|
||||||
|
# silently does nothing.
|
||||||
|
#
|
||||||
|
# Normally the launcher starts the worker(s) in background, then exits.
|
||||||
|
# Using the --wait command line option you may wait for the worker(s)
|
||||||
|
# and get exit code 1 if any worker has reported some error.
|
||||||
|
#
|
||||||
|
# You may call this script from the cron - in this case must be set
|
||||||
|
# the CRON environment variable in crontab and must be exported to the script.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# mod: "instance" => "service"
|
||||||
|
# 2022-08-03 v0.6
|
||||||
|
# new: it optionally waits for the worker(s) and returns with exit code 1,
|
||||||
|
# if it has got at least one non-zero exit code from them.
|
||||||
|
# 2021-02-15 v0.5
|
||||||
|
# fix: omits the error message when there is no services at all
|
||||||
|
# 2021-02-05 v0.4
|
||||||
|
# fix: proper $PATH settings in Debian environment as well
|
||||||
|
# 2021-02-04 v0.3
|
||||||
|
# fix: decimal point trouble in non-english environments (eg. Hungarian)
|
||||||
|
# 2021-01-05 v0.2
|
||||||
|
# fix: LANG=C and LC_ALL=C initialisations have removed, because these may
|
||||||
|
# interfere with UTF-8 file name encoding in Java calls:
|
||||||
|
# https://ogris.de/howtos/java-utf8-filenames.html
|
||||||
|
# 2020-11-20 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
CRON=${CRON-""} # Does it run from cron?
|
||||||
|
SERVICE_BASE=${SERVICE_BASE-"$HOME/services"} # Services' folder path
|
||||||
|
SLEEP_BETWEEN=${SLEEP_BETWEEN-"0"} # Secs between forks
|
||||||
|
|
||||||
|
# There is nothing to configure below (I hope).
|
||||||
|
###############################################
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_BADOPT="Invalid option"
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_USAGE="For one service: $($(which basename) "$0") [--wait] servicename\n"
|
||||||
|
MSG_USAGE+="For all services: $($(which basename) "$0") [--wait] --doit\n"
|
||||||
|
MSG_USAGE+="Environment variables:\n"
|
||||||
|
MSG_USAGE+=" SERVICE_BASE Absolute path to the folder containing services\n"
|
||||||
|
MSG_USAGE+=" (default: \$HOME/services)\n"
|
||||||
|
MSG_USAGE+=" SLEEP_BETWEEN Secs (may fraction) to wait between multiple launches\n"
|
||||||
|
MSG_USAGE+=" (default: 0.0)"
|
||||||
|
|
||||||
|
# Getting command line options.
|
||||||
|
DOIT="" # Enables launch processes for all available services.
|
||||||
|
WAIT="" # Wait for the result of the actually launched process.
|
||||||
|
while getopts ":-:" option
|
||||||
|
do
|
||||||
|
case ${option} in
|
||||||
|
"-" )
|
||||||
|
if [ "$OPTARG" = "doit" ]; then DOIT="yes"
|
||||||
|
elif [ "$OPTARG" = "wait" ]; then WAIT="yes"
|
||||||
|
else echo "$MSG_BADOPT --$OPTARG" >&2; exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
\? )
|
||||||
|
echo "$MSG_BADOPT -$OPTARG" >&2; exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done; shift $((OPTIND -1))
|
||||||
|
# Done with options.
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
#
|
||||||
|
# Corrects the PATH if the operating system didn't loaded yet;
|
||||||
|
# it is a bug with cron pam_env, I think.
|
||||||
|
if [ -n "$CRON" ]; then
|
||||||
|
[[ -r "/etc/profile" ]] && source "/etc/profile"
|
||||||
|
# Ubuntu gets the initial environment from a separate file.
|
||||||
|
if [ -r "/etc/environment" ]; then
|
||||||
|
# Extracts from this file, strips the right part w/o quotes.
|
||||||
|
includepath=$(cat "/etc/environment" | $(which egrep) '^PATH=')
|
||||||
|
includepath=${includepath:5}
|
||||||
|
includepath="${includepath%\"}"; includepath="${includepath#\"}"
|
||||||
|
[[ -n "$includepath" ]] && PATH="$PATH:$includepath"
|
||||||
|
unset includepath
|
||||||
|
fi
|
||||||
|
# We need the $HOME/bin as well.
|
||||||
|
PATH="$HOME/bin:$PATH"
|
||||||
|
fi
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
#for item in basename df egrep head mail printf sleep
|
||||||
|
for item in basename printf sleep
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Initializations and sanitizations.
|
||||||
|
#
|
||||||
|
if [ -z "$SERVICE_BASE" -o ! -d "$SERVICE_BASE" ]; then exit 1; fi
|
||||||
|
SLEEP_BETWEEN=$("$PRINTF" '%.2f' "$SLEEP_BETWEEN" 2>/dev/null) # To float
|
||||||
|
TOOLS_BASE="tools" # Relative path
|
||||||
|
WORKERFILE="$("$BASENAME" "$0")" # Same filename
|
||||||
|
|
||||||
|
# Collects the service(s).
|
||||||
|
if [ -n "$1" ]; then
|
||||||
|
# One service.
|
||||||
|
SERVICES="$1/"; shift
|
||||||
|
elif [ -n "$DOIT" ]; then
|
||||||
|
# All services when has forced.
|
||||||
|
SERVICES="$(cd "$SERVICE_BASE"; ls -d */ 2>/dev/null)"
|
||||||
|
else
|
||||||
|
echo -e "$MSG_USAGE" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Prepares to save the result codes.
|
||||||
|
declare -A JOBS
|
||||||
|
# Eumerates the service folders.
|
||||||
|
[[ -z "$SERVICES" ]] && exit
|
||||||
|
for service in $SERVICES ""
|
||||||
|
do
|
||||||
|
# Safety first...
|
||||||
|
if [ -n "$service" ]; then
|
||||||
|
# Forks the worker if it does exist and does runnable.
|
||||||
|
if [ -x "$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" ]; then
|
||||||
|
# Sets PATH and USER, passes all remaining command line parameters.
|
||||||
|
[[ -n "$CRON" ]] && export PATH USER
|
||||||
|
"$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" "$@" &
|
||||||
|
PID="$!"
|
||||||
|
# Optionally waits for the worker and saves the result.
|
||||||
|
if [ -n "$WAIT" ]
|
||||||
|
then wait $PID; JOBS["$service"]=$?; fi
|
||||||
|
# Optionally reduces the fork frequency.
|
||||||
|
"$SLEEP" ${SLEEP_BETWEEN//,/.} # decimal point need
|
||||||
|
else
|
||||||
|
# Optionally reports the failure.
|
||||||
|
if [ -n "$WAIT" ]
|
||||||
|
then JOBS["$service"]=1; fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Optionally returns with exit code 1, if it has got at least one
|
||||||
|
# non-zero exit code from workers.
|
||||||
|
if [ -n "$WAIT" ]; then
|
||||||
|
for key in "${!JOBS[@]}"
|
||||||
|
do [[ "${JOBS[$key]}" -gt 0 ]] && exit 1
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
7952
.templates/bin/acme.sh
Executable file
7952
.templates/bin/acme.sh
Executable file
File diff suppressed because it is too large
Load Diff
1
.templates/bin/build
Symbolic link
1
.templates/bin/build
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.launcher
|
93
.templates/bin/copytruncate
Executable file
93
.templates/bin/copytruncate
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# A replacement of the logrotate's copytruncate method to use when:
|
||||||
|
# * we haven't permission to change ownership, so the built-in copytruncate
|
||||||
|
# method would fail (this is a bug in logrotate, I think);
|
||||||
|
# * we haven't permission to reload the service, so the create new log method
|
||||||
|
# doesn't work - the service would still write to the already rotated file.
|
||||||
|
#
|
||||||
|
# This script, when called as a prerotate script from logrotate configuration:
|
||||||
|
# * copytruncates a file having $LOGEXT (see below) extesion and compresses it
|
||||||
|
# (and returns with 1 exit code, thus, the logrotate will skip this file).
|
||||||
|
# * does nothing with files having any other extensions - e.g .1, .2 and so on
|
||||||
|
# (and returns with 0 exit code, thus, the logrotate can process this file);
|
||||||
|
#
|
||||||
|
# In other words, with default settings it simulates the effect of logrotate
|
||||||
|
# configuration options below:
|
||||||
|
# <pathname>/*.log {
|
||||||
|
# dateext
|
||||||
|
# dateyesterday
|
||||||
|
# dateformat %Y-%m-%d.
|
||||||
|
# extension log
|
||||||
|
# compress
|
||||||
|
# copytruncate
|
||||||
|
# [...]
|
||||||
|
# }
|
||||||
|
# but doesn't stop if failed to set the permissions during copytruncate.
|
||||||
|
#
|
||||||
|
# The script receives the file to process as it's first command line parameter.
|
||||||
|
# Lack of the parameter it simply does nothing.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# 2020-11-09 v0.1 Initial release
|
||||||
|
|
||||||
|
# Configurable parameters - must be consistent with the logrotate configuration.
|
||||||
|
#
|
||||||
|
DATESTRING="$(date "+%Y-%m-%d" -d '1 day ago')" # Yesterday goes to the filename
|
||||||
|
LOGEXT="log" # Processes only *.$LOGEXT files
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename cp dirname gzip
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
#
|
||||||
|
# An additional bugfix (use "$(which gzip)" instead of "$GZIP"):
|
||||||
|
# https://www.gnu.org/software/gzip/manual/html_node/Environment.html
|
||||||
|
GZIP=""
|
||||||
|
|
||||||
|
# Lack of the parameter (unlikely) it simply does nothing.
|
||||||
|
# The script returns an OK status, so the logrotate will continue.
|
||||||
|
[[ -z "$1" ]] && exit 0
|
||||||
|
# Parses the parameter.
|
||||||
|
LOGDIR="$("$DIRNAME" $1)"
|
||||||
|
LOGFILE="$("$BASENAME" $1)"
|
||||||
|
NEWFILE="${LOGFILE%.*}.$DATESTRING.${LOGFILE##*.}"
|
||||||
|
|
||||||
|
# If the parameter doesn't point to a writeable file it simply does nothing.
|
||||||
|
[[ ! -w "$LOGDIR/$LOGFILE" ]] && exit 0
|
||||||
|
# If the log's folder isn't writable it simply does nothing.
|
||||||
|
[[ ! -w "$LOGDIR" ]] && exit 0
|
||||||
|
# If the extension doesn't match it simply does nothing.
|
||||||
|
[[ "${LOGFILE##*.}" != "$LOGEXT" ]] && exit 0
|
||||||
|
# Cases above must handled by the logrotate itself.
|
||||||
|
# The script returns an OK status, so the logrotate will continue.
|
||||||
|
|
||||||
|
# Doesn't copy if it would overwrite something.
|
||||||
|
# Returns an error status, so the logrotate won't process this file.
|
||||||
|
[[ -e "$LOGDIR/$NEWFILE" ]] && exit 1
|
||||||
|
[[ -e "$LOGDIR/$NEWFILE.gz" ]] && exit 1
|
||||||
|
# Tries to copy the current logfile.
|
||||||
|
"$CP" -p "$LOGDIR/$LOGFILE" "$LOGDIR/$NEWFILE" # >/dev/null 2>&1
|
||||||
|
# On error returns an error status so the logrotate won't process this file.
|
||||||
|
[[ ! -r "$LOGDIR/$NEWFILE" ]] && exit 1
|
||||||
|
# Done with copy. Tries to empty the current logfile. Doesn't check the result.
|
||||||
|
: >"$LOGDIR/$LOGFILE"
|
||||||
|
# Compresses the rotated logfile. Doesn't check the result.
|
||||||
|
"$(which gzip)" "$LOGDIR/$NEWFILE" #>/dev/null 2>&1
|
||||||
|
# Returns a fake error status to prevent the logrotate to process this file.
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
3
.templates/bin/mail
Normal file
3
.templates/bin/mail
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Dummy file to met the dependencies. Does nothing.
|
148
.templates/bin/maintenance_daily
Executable file
148
.templates/bin/maintenance_daily
Executable file
@ -0,0 +1,148 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Maintenence operations once by day.
|
||||||
|
# This script called usually by the cron - in this case the CRON variable
|
||||||
|
# must be set in crontab and exported to the script.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# mod: "instance" => "service"
|
||||||
|
# 2023-02-12 v0.9
|
||||||
|
# new: shows the ZFS disk partitions (if any) as well.
|
||||||
|
# 2023-01-20 v0.8
|
||||||
|
# fix: doesn't try to reload non-existing or stopped web servers.
|
||||||
|
# 2021-10-08 v0.7
|
||||||
|
# fix: tries to reload the web server(s) to get in effect the renewed SSL
|
||||||
|
# certificates (if any).
|
||||||
|
# 2021-06-02 v0.6
|
||||||
|
# fix: more accurate egrep for df -h output.
|
||||||
|
# fix: typo => 98-fsck-at-rebooz
|
||||||
|
# 2021-02-15 v0.5
|
||||||
|
# fix: omits the error message when there is no services at all
|
||||||
|
# 2021-02-05 v0.4
|
||||||
|
# fix: proper $PATH settings in Debian environment as well
|
||||||
|
# 2021-02-04 v0.3
|
||||||
|
# fix: decimal point trouble in non-english environments (eg. Hungarian)
|
||||||
|
# 2021-01-05 v0.2
|
||||||
|
# fix: LANG=C and LC_ALL=C initialisations have removed, because these may
|
||||||
|
# interfere with UTF-8 file name encoding in Java calls:
|
||||||
|
# https://ogris.de/howtos/java-utf8-filenames.html
|
||||||
|
# fix: Missing message.
|
||||||
|
# 2020-11-12 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
CRON=${CRON-""} # Does it run from cron?
|
||||||
|
SERVICE_BASE=${SERVICE_BASE-"$HOME/services"} # Services' folder path
|
||||||
|
SLEEP_BETWEEN=${SLEEP_BETWEEN-"120"} # Secs between forks
|
||||||
|
|
||||||
|
# There is nothing to configure below (I hope).
|
||||||
|
###############################################
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
#
|
||||||
|
# Corrects the PATH if the operating system didn't loaded yet;
|
||||||
|
# it is a bug with cron pam_env, I think.
|
||||||
|
if [ -n "$CRON" ]; then
|
||||||
|
[[ -r "/etc/profile" ]] && source "/etc/profile"
|
||||||
|
# Ubuntu gets the initial environment from a separate file.
|
||||||
|
if [ -r "/etc/environment" ]; then
|
||||||
|
# Extracts from this file, strips the right part w/o quotes.
|
||||||
|
includepath=$(cat "/etc/environment" | $(which egrep) '^PATH=')
|
||||||
|
includepath=${includepath:5}
|
||||||
|
includepath="${includepath%\"}"; includepath="${includepath#\"}"
|
||||||
|
[[ -n "$includepath" ]] && PATH="$PATH:$includepath"
|
||||||
|
unset includepath
|
||||||
|
fi
|
||||||
|
# We need the $HOME/bin as well.
|
||||||
|
PATH="$HOME/bin:$PATH"
|
||||||
|
fi
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename cut df egrep head mail printf sleep
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Checks and sanitizations.
|
||||||
|
#
|
||||||
|
if [ -z "$SERVICE_BASE" -o ! -d "$SERVICE_BASE" ]; then exit 1; fi
|
||||||
|
SLEEP_BETWEEN=$("$PRINTF" '%.2f' "$SLEEP_BETWEEN" 2>/dev/null) # To float
|
||||||
|
|
||||||
|
# Sends a mail report to the Linux user itself.
|
||||||
|
# Hopes the appropriate message forward rule has been set.
|
||||||
|
if [ -x "$MAIL" ]; then
|
||||||
|
subject="[Maintenance] $HOSTNAME daily report"
|
||||||
|
message="This is a report from $("$BASENAME" "$0") script on $HOSTNAME.\n\n"
|
||||||
|
[[ -x "/etc/update-motd.d/00-header" ]] \
|
||||||
|
&& message+="$("/etc/update-motd.d/00-header")\n\n"
|
||||||
|
message+="$("$DF" -h | "$HEAD" -n1)\n"
|
||||||
|
message+="$("$DF" -h | "$EGREP" '^/dev/' | "$EGREP" -v 'loop')\n"
|
||||||
|
if [ -n "$(which zpool)" -a -x "$(which zpool)" ]; then
|
||||||
|
# Includes ZFS partitions (if any).
|
||||||
|
for pool in $("$(which zpool)" list -H | "$CUT" -f1) "" #"
|
||||||
|
do [[ -n "$pool" ]] && message+="$("$DF" -h | "$EGREP" "^$pool/")\n"; done
|
||||||
|
fi
|
||||||
|
[[ -x "/etc/update-motd.d/90-updates-available" ]] \
|
||||||
|
&& message+="$("/etc/update-motd.d/90-updates-available")\n"
|
||||||
|
[[ -x "/etc/update-motd.d/98-fsck-at-reboot" ]] \
|
||||||
|
&& message+="$("/etc/update-motd.d/98-fsck-at-reboot")\n"
|
||||||
|
[[ -x "/etc/update-motd.d/99-reboot-required" ]] \
|
||||||
|
&& message+="$("/etc/update-motd.d/99-reboot-required")\n"
|
||||||
|
message+="\nBest regards: the Maintenance Bot"
|
||||||
|
echo -e "$message" | "$MAIL" -s "$subject" "$USER"
|
||||||
|
fi
|
||||||
|
# Done with mail.
|
||||||
|
|
||||||
|
# Tries to reload the webserver(s) to get in effect
|
||||||
|
# the renewed SSL certificates (if any).
|
||||||
|
if [ -n "$(which sudo)" -a -n "$(which systemctl)" ]; then
|
||||||
|
for webserver in apache2 nginx
|
||||||
|
do
|
||||||
|
if [[ $( systemctl status $webserver >/dev/null 2>&1; echo $? ) -eq 0 ]]; then
|
||||||
|
sudo -n systemctl reload $webserver >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
# Done with the webserver(s).
|
||||||
|
|
||||||
|
# Launches the worker scripts with the same name, one for every services.
|
||||||
|
# Services are subfolders of the $SERVICE_BASE folder set above.
|
||||||
|
TOOLS_BASE="tools" # Relative path to worker scripts
|
||||||
|
WORKERFILE="$("$BASENAME" "$0")" # Same filename
|
||||||
|
# Gets the service folders, give up if none.
|
||||||
|
SERVICES="$(cd "$SERVICE_BASE"; ls -d */ 2>/dev/null)"
|
||||||
|
[[ -z "$SERVICES" ]] && exit
|
||||||
|
# Eumerates the service folders.
|
||||||
|
for service in $SERVICES ""
|
||||||
|
do
|
||||||
|
# Safety first...
|
||||||
|
if [ -n "$service" ]; then
|
||||||
|
# Forks the worker if it does exist and does runnable.
|
||||||
|
# Passes all remaining command line parameters.
|
||||||
|
if [ -x "$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" ]; then
|
||||||
|
[[ -n "$CRON" ]] && export PATH USER
|
||||||
|
"$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" "$@" &
|
||||||
|
# Optionally reduces the fork frequency.
|
||||||
|
"$SLEEP" ${SLEEP_BETWEEN//,/.} # decimal point need
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Done with workers.
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
101
.templates/bin/maintenance_midnight
Executable file
101
.templates/bin/maintenance_midnight
Executable file
@ -0,0 +1,101 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Maintenence operations at midnight.
|
||||||
|
# This script called usually by the cron - in this case the CRON variable
|
||||||
|
# must be set in crontab and exported to the script.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# mod: "instance" => "service"
|
||||||
|
# 2021-02-15 v0.5
|
||||||
|
# fix: omits the error message when there is no services at all
|
||||||
|
# 2021-02-05 v0.4
|
||||||
|
# fix: proper $PATH settings in Debian environment as well
|
||||||
|
# 2021-02-04 v0.3
|
||||||
|
# fix: decimal point trouble in non-english environments (eg. Hungarian)
|
||||||
|
# 2021-01-05 v0.2
|
||||||
|
# fix: LANG=C and LC_ALL=C initialisations have removed, because these may
|
||||||
|
# interfere with UTF-8 file name encoding in Java calls:
|
||||||
|
# https://ogris.de/howtos/java-utf8-filenames.html
|
||||||
|
# fix: Missing message.
|
||||||
|
# 2020-11-12 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
CRON=${CRON-""} # Does it run from cron?
|
||||||
|
SERVICE_BASE=${SERVICE_BASE-"$HOME/services"} # Services' folder path
|
||||||
|
SLEEP_BETWEEN=${SLEEP_BETWEEN-"1"} # Secs between forks
|
||||||
|
|
||||||
|
# There is nothing to configure below (I hope).
|
||||||
|
###############################################
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
#
|
||||||
|
# Corrects the PATH if the operating system didn't loaded yet;
|
||||||
|
# it is a bug with cron pam_env, I think.
|
||||||
|
if [ -n "$CRON" ]; then
|
||||||
|
[[ -r "/etc/profile" ]] && source "/etc/profile"
|
||||||
|
# Ubuntu gets the initial environment from a separate file.
|
||||||
|
if [ -r "/etc/environment" ]; then
|
||||||
|
# Extracts from this file, strips the right part w/o quotes.
|
||||||
|
includepath=$(cat "/etc/environment" | $(which egrep) '^PATH=')
|
||||||
|
includepath=${includepath:5}
|
||||||
|
includepath="${includepath%\"}"; includepath="${includepath#\"}"
|
||||||
|
[[ -n "$includepath" ]] && PATH="$PATH:$includepath"
|
||||||
|
unset includepath
|
||||||
|
fi
|
||||||
|
# We need the $HOME/bin as well.
|
||||||
|
PATH="$HOME/bin:$PATH"
|
||||||
|
fi
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename printf sleep
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Checks and sanitizations.
|
||||||
|
#
|
||||||
|
if [ -z "$SERVICE_BASE" -o ! -d "$SERVICE_BASE" ]; then exit 1; fi
|
||||||
|
SLEEP_BETWEEN=$("$PRINTF" '%.2f' "$SLEEP_BETWEEN" 2>/dev/null) # To float
|
||||||
|
|
||||||
|
# Launches the worker scripts with the same name, one for every services.
|
||||||
|
# Services are subfolders of the $SERVICE_BASE folder set above.
|
||||||
|
TOOLS_BASE="tools" # Relative path to worker scripts
|
||||||
|
WORKERFILE="$("$BASENAME" "$0")" # Same filename
|
||||||
|
# Gets the service folders, give up if none.
|
||||||
|
SERVICES="$(cd "$SERVICE_BASE"; ls -d */ 2>/dev/null)"
|
||||||
|
[[ -z "$SERVICES" ]] && exit
|
||||||
|
# Eumerates the service folders.
|
||||||
|
for service in $SERVICES ""
|
||||||
|
do
|
||||||
|
# Safety first...
|
||||||
|
if [ -n "$service" ]; then
|
||||||
|
# Forks the worker if it does exist and does runnable.
|
||||||
|
# Passes all remaining command line parameters.
|
||||||
|
if [ -x "$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" ]; then
|
||||||
|
[[ -n "$CRON" ]] && export PATH USER
|
||||||
|
"$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" "$@" &
|
||||||
|
# Optionally reduces the fork frequency.
|
||||||
|
"$SLEEP" ${SLEEP_BETWEEN//,/.} # decimal point need
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Done with workers.
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
112
.templates/bin/maintenance_reboot
Executable file
112
.templates/bin/maintenance_reboot
Executable file
@ -0,0 +1,112 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Maintenence operations at reboot.
|
||||||
|
# This script called usually by the cron - in this case the CRON variable
|
||||||
|
# must be set in crontab and exported to the script.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# mod: "instance" => "service"
|
||||||
|
# 2021-02-15 v0.5
|
||||||
|
# fix: omits the error message when there is no services at all
|
||||||
|
# 2021-02-05 v0.4
|
||||||
|
# fix: proper $PATH settings in Debian environment as well
|
||||||
|
# 2021-02-04 v0.3
|
||||||
|
# fix: decimal point trouble in non-english environments (eg. Hungarian)
|
||||||
|
# 2021-01-05 v0.2
|
||||||
|
# fix: LANG=C and LC_ALL=C initialisations have removed, because these may
|
||||||
|
# interfere with UTF-8 file name encoding in Java calls:
|
||||||
|
# https://ogris.de/howtos/java-utf8-filenames.html
|
||||||
|
# fix: Missing message.
|
||||||
|
# 2020-11-12 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
CRON=${CRON-""} # Does it run from cron?
|
||||||
|
SERVICE_BASE=${SERVICE_BASE-"$HOME/services"} # Services' folder path
|
||||||
|
SLEEP_BETWEEN=${SLEEP_BETWEEN-"60"} # Secs between forks
|
||||||
|
|
||||||
|
# There is nothing to configure below (I hope).
|
||||||
|
###############################################
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
#
|
||||||
|
# Corrects the PATH if the operating system didn't loaded yet;
|
||||||
|
# it is a bug with cron pam_env, I think.
|
||||||
|
if [ -n "$CRON" ]; then
|
||||||
|
[[ -r "/etc/profile" ]] && source "/etc/profile"
|
||||||
|
# Ubuntu gets the initial environment from a separate file.
|
||||||
|
if [ -r "/etc/environment" ]; then
|
||||||
|
# Extracts from this file, strips the right part w/o quotes.
|
||||||
|
includepath=$(cat "/etc/environment" | $(which egrep) '^PATH=')
|
||||||
|
includepath=${includepath:5}
|
||||||
|
includepath="${includepath%\"}"; includepath="${includepath#\"}"
|
||||||
|
[[ -n "$includepath" ]] && PATH="$PATH:$includepath"
|
||||||
|
unset includepath
|
||||||
|
fi
|
||||||
|
# We need the $HOME/bin as well.
|
||||||
|
PATH="$HOME/bin:$PATH"
|
||||||
|
fi
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename mail printf sleep
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Checks and sanitizations.
|
||||||
|
#
|
||||||
|
if [ -z "$SERVICE_BASE" -o ! -d "$SERVICE_BASE" ]; then exit 1; fi
|
||||||
|
SLEEP_BETWEEN=$("$PRINTF" '%.2f' "$SLEEP_BETWEEN" 2>/dev/null) # To float
|
||||||
|
|
||||||
|
# Sends a mail message to the Linux user itself.
|
||||||
|
# Hopes the appropriate message forward rule has been set.
|
||||||
|
if [ -x "$MAIL" ]; then
|
||||||
|
subject="[Maintenance] $HOSTNAME has been rebooted!"
|
||||||
|
message="This is a warning from $("$BASENAME" "$0") script on $HOSTNAME.\n"
|
||||||
|
message+="The machine has been rebooted few minutes ago.\n\n"
|
||||||
|
message+="Best regards: the Maintenance Bot"
|
||||||
|
echo -e "$message" | "$MAIL" -s "$subject" "$USER"
|
||||||
|
fi
|
||||||
|
# Done with mail.
|
||||||
|
|
||||||
|
# Launches the worker scripts with the same name, one for every services.
|
||||||
|
# Services are subfolders of the $SERVICE_BASE folder set above.
|
||||||
|
TOOLS_BASE="tools" # Relative path to worker scripts
|
||||||
|
WORKERFILE="$("$BASENAME" "$0")" # Same filename
|
||||||
|
# Gets the service folders, give up if none.
|
||||||
|
SERVICES="$(cd "$SERVICE_BASE"; ls -d */ 2>/dev/null)"
|
||||||
|
[[ -z "$SERVICES" ]] && exit
|
||||||
|
# Eumerates the service folders.
|
||||||
|
for service in $SERVICES ""
|
||||||
|
do
|
||||||
|
# Safety first...
|
||||||
|
if [ -n "$service" ]; then
|
||||||
|
# Forks the worker if it does exist and does runnable.
|
||||||
|
# Passes all remaining command line parameters.
|
||||||
|
if [ -x "$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" ]; then
|
||||||
|
[[ -n "$CRON" ]] && export PATH USER
|
||||||
|
"$SERVICE_BASE/$service$TOOLS_BASE/$WORKERFILE" "$@" &
|
||||||
|
# Optionally reduces the fork frequency.
|
||||||
|
"$SLEEP" ${SLEEP_BETWEEN//,/.} # decimal point need
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Done with workers.
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
299
.templates/bin/mysql_dumpdb
Executable file
299
.templates/bin/mysql_dumpdb
Executable file
@ -0,0 +1,299 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
#
|
||||||
|
# Dumps a MySQL database from a native or dockerized MySQL instance running
|
||||||
|
# on this box. This is a wrapper script to the mysqldump tool.
|
||||||
|
#
|
||||||
|
# If the MySQL is dockerized you need call as a Docker manager user
|
||||||
|
# (member of the docker Linux group).
|
||||||
|
#
|
||||||
|
# Accepts few mysql_dump options as well as the optional database password
|
||||||
|
# and the optional output pathname:
|
||||||
|
#
|
||||||
|
# $0 [-u dbuser] [-p dbpass] [-h dbhost] [-P dbport]
|
||||||
|
# [-C container] [-d database] [-f dumpfile ] [--compress] [--force]
|
||||||
|
# [database (if not in -d)] [dumpfile (if not in -f)]
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# 2023-02-15 v0.3
|
||||||
|
# fix: Some updates to MySQL messed with our mysqldump settings.
|
||||||
|
# https://dev.mysql.com/doc/relnotes/mysql/5.7/en/news-5-7-31.html#mysqld-5-7-31-security
|
||||||
|
# --no-tablespaces (the recommended option) has been added to fix.
|
||||||
|
# https://dev.mysql.com/doc/refman/5.7/en/flush.html#flush-tables-with-read-lock
|
||||||
|
# --single-transaction=false has been added as a temporary workaround.
|
||||||
|
# 2020-11-12 v0.2
|
||||||
|
# fix: "$(which gzip)" instad of "$GZIP", see also:
|
||||||
|
# https://www.gnu.org/software/gzip/manual/html_node/Environment.html
|
||||||
|
# mod: Accepts a dump folder name as well, instead of a dump file name.
|
||||||
|
# 2020-09-17 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
MYCONTAINER=${MYCONTAINER-""} # Docker container's name
|
||||||
|
MYDATABASE=${MYDATABASE-""} # Database name to dump
|
||||||
|
MYDUMP=${MYDUMP-""} # Dump file pathname
|
||||||
|
MYHOST=${MYHOST:-"localhost"} # Connection parameter
|
||||||
|
MYOPTIONS=${MYOPTIONS-""} # Options to pass to mysqldump
|
||||||
|
MYPASSWORD=${MYPASSWORD-""} # Credential for the MySQL user
|
||||||
|
MYPORT=${MYPORT:-"3306"} # Connection parameter
|
||||||
|
MYUSER=${MYUSER:-"root"} # MySQL user for this dump
|
||||||
|
|
||||||
|
### Temporailly ignored! Need to sanitize.
|
||||||
|
MYOPTIONS=""
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
#
|
||||||
|
MYDUMPFORCED="" # Dumps despite failed checks
|
||||||
|
# Our default parameters for the mysqldump.
|
||||||
|
# Content of the MYOPTIONS will also appended during the actual dump.
|
||||||
|
dumpparameters="--comments --events --routines --triggers "
|
||||||
|
dumpparameters+="--complete-insert --dump-date --force --no-create-db "
|
||||||
|
dumpparameters+="--opt --single-transaction "
|
||||||
|
## https://dev.mysql.com/doc/relnotes/mysql/5.7/en/news-5-7-31.html#mysqld-5-7-31-security
|
||||||
|
dumpparameters+="--no-tablespaces "
|
||||||
|
## https://dev.mysql.com/doc/relnotes/mysql/5.7/en/news-5-7-41.html
|
||||||
|
## a temporary workaround only
|
||||||
|
dumpparameters+="--single-transaction=false "
|
||||||
|
# Technical databases which are never dumped.
|
||||||
|
vetodatabases="information_schema mysql performance_schema sys"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_ABORTED="aborted"
|
||||||
|
MSG_BADCRED="Bad credentials for MySQL"
|
||||||
|
MSG_BADOPT="Invalid option"
|
||||||
|
MSG_DOESNOTRUN="Doesn't run the database container"
|
||||||
|
MSG_DOCKERGRPNEED="You must be a member of the docker group."
|
||||||
|
MSG_FAILSIZE="Failed to size the database"
|
||||||
|
MSG_FORCED="but forced to continue"
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_MISSINGDB="Missing database"
|
||||||
|
MSG_NONWRITE="The target directory isn't writable"
|
||||||
|
MSG_NOSPACE="Not enough space to dump the database"
|
||||||
|
MSG_USAGE="Usage: $0 [options] [database [dump_pathname|-]]\n"
|
||||||
|
MSG_USAGE+="Option:\tENVVAR:\n"
|
||||||
|
MSG_USAGE+=" -C\tMYCONTAINER\tMySQL Docker container's name\n"
|
||||||
|
MSG_USAGE+=" -d\tMYDATABASE\tMySQL database to dump\n"
|
||||||
|
MSG_USAGE+=" -f\tMYDUMP\t\tDumpfile pathname\n"
|
||||||
|
MSG_USAGE+=" -h\tMYHOST\t\tHostname or IP to connect (localhost)\n"
|
||||||
|
MSG_USAGE+=" -p\tMYPORT\t\tTCP port to connect (3306)\n"
|
||||||
|
MSG_USAGE+=" -P\tMYPASSWORD\tMySQL password\n"
|
||||||
|
MSG_USAGE+=" -u\tMYUSER\t\tMySQL username (root)\n"
|
||||||
|
MSG_USAGE+="--compress\t\tCompresses with gzip\n"
|
||||||
|
MSG_USAGE+="--force\t\t\tForces the operation despite the failed checks\n"
|
||||||
|
|
||||||
|
# Getting options.
|
||||||
|
#
|
||||||
|
while getopts ":-:c:C:d:D:f:F:h:H:p:P:u:U:" option
|
||||||
|
do
|
||||||
|
case ${option} in
|
||||||
|
"-" )
|
||||||
|
if [ "$OPTARG" = "compress" ]; then compress="yes"
|
||||||
|
elif [ "$OPTARG" = "force" ]; then MYDUMPFORCED="yes"
|
||||||
|
elif [ "$OPTARG" = "help" ]; then echo -e "$MSG_USAGE" >&2; exit
|
||||||
|
else echo "$MSG_BADOPT --$OPTARG" >&2; exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
"c" | "C" )
|
||||||
|
MYCONTAINER="$OPTARG"
|
||||||
|
;;
|
||||||
|
"d" | "D" )
|
||||||
|
MYDATABASE="$OPTARG"
|
||||||
|
;;
|
||||||
|
"f" | "F" )
|
||||||
|
MYDUMP="$OPTARG"
|
||||||
|
;;
|
||||||
|
"h" | "H" )
|
||||||
|
MYHOST="$OPTARG"
|
||||||
|
;;
|
||||||
|
"P" )
|
||||||
|
MYPASSWORD="$OPTARG"
|
||||||
|
;;
|
||||||
|
"p" )
|
||||||
|
MYPORT="$OPTARG"
|
||||||
|
;;
|
||||||
|
"u" | "U" )
|
||||||
|
MYUSER="$OPTARG"
|
||||||
|
;;
|
||||||
|
\? )
|
||||||
|
echo "$MSG_BADOPT -$OPTARG" >&2; exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done; shift $((OPTIND -1))
|
||||||
|
# All options are processed.
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
# Conditional dependencies (according to native or dockerized environment).
|
||||||
|
[[ -z "$MYCONTAINER" ]] \
|
||||||
|
&& additem="mysql mysqldump" \
|
||||||
|
|| additem="docker"
|
||||||
|
# Common dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in cat cut date df dirname grep gzip hostname id pwd sed tail tee $additem
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
#
|
||||||
|
# An additional bugfix (use "$(which gzip)" instead of "$GZIP"):
|
||||||
|
# https://www.gnu.org/software/gzip/manual/html_node/Environment.html
|
||||||
|
GZIP=""
|
||||||
|
|
||||||
|
# Need to be root or a Docker manager user if the DB runs in a container.
|
||||||
|
#
|
||||||
|
[[ -n "$MYCONTAINER" ]] && [[ "$USER" != 'root' ]] \
|
||||||
|
&& [[ -z "$(echo "$("$ID" -Gn "$USER") " | "$GREP" ' docker ')" ]] \
|
||||||
|
&& echo "$MSG_DOCKERGRPNEED" >&2 && exit 1 #"
|
||||||
|
|
||||||
|
# If the MySQL is dockerized the container must be running.
|
||||||
|
#
|
||||||
|
[[ -n "$MYCONTAINER" ]] \
|
||||||
|
&& [[ -z "$("$DOCKER" ps -q -f name=$MYCONTAINER)" ]] \
|
||||||
|
&& echo "$MSG_DOESNOTRUN $MYCONTAINER" >&2 && exit 1
|
||||||
|
|
||||||
|
# Determines the (mandatory) database to dump.
|
||||||
|
#
|
||||||
|
# Lack of -d the 1st non-option parameter is the database's name.
|
||||||
|
if [ -z "$MYDATABASE" -a -n "$1" ]; then MYDATABASE="$1"; shift; fi
|
||||||
|
if [ -z "$MYDATABASE" ]; then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# A humble sanitization.
|
||||||
|
if [[ ! "$MYDATABASE" =~ ^([[:alnum:]]|[_])*$ ]]; then
|
||||||
|
echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# Silently refuses MySQL internal databases.
|
||||||
|
for veto in $vetodatabases ""
|
||||||
|
do
|
||||||
|
[[ "$MYDATABASE" = "$veto" ]] && exit 0
|
||||||
|
done
|
||||||
|
# We've a database name to dump.
|
||||||
|
|
||||||
|
# Optional backup file pathname, defaults to ./hostname.timestamp.MYDATABASE.sql
|
||||||
|
#
|
||||||
|
if [ -z "$MYDUMP" -a -n "$1" ]; then MYDUMP="$1"; shift; fi
|
||||||
|
if [ -d "$MYDUMP" ]; then
|
||||||
|
MYDUMP+="/$MYDATABASE.$("$DATE" '+%Y%m%d_%H%M%S').$("$HOSTNAME").sql"
|
||||||
|
fi
|
||||||
|
if [ -z "$MYDUMP" ]; then
|
||||||
|
MYDUMP="$PWD/$MYDATABASE.$("$DATE" '+%Y%m%d_%H%M%S').$("$HOSTNAME").sql"
|
||||||
|
fi
|
||||||
|
if [ "$MYDUMP" = "-" ]; then
|
||||||
|
# If '-' was given as the MYDUMP, we need output to STDOUT.
|
||||||
|
MYDUMP=""
|
||||||
|
logfile="/dev/null"
|
||||||
|
else
|
||||||
|
# Adds the relevant extension to the MYDUMP and the logfile.
|
||||||
|
MYDUMP="${MYDUMP%.sql}.sql"
|
||||||
|
logfile="${MYDUMP%.sql}.log"
|
||||||
|
fi
|
||||||
|
# The folder to contain the new files must be writable.
|
||||||
|
[[ -n "$MYDUMP" ]] && [[ ! -w "$("$DIRNAME" "$MYDUMP")" ]] \
|
||||||
|
&& echo "$MSG_NONWRITE \"$("$DIRNAME" "$MYDUMP")\"" >&2 && exit 1
|
||||||
|
|
||||||
|
# Prepopulates the MySQL commands.
|
||||||
|
#
|
||||||
|
my_connect=""
|
||||||
|
[[ -n "$MYHOST" ]] && my_connect+=" --host=$MYHOST"
|
||||||
|
[[ -n "$MYPORT" ]] && my_connect+=" --port=$MYPORT"
|
||||||
|
[[ -n "$MYUSER" ]] && my_connect+=" --user=$MYUSER"
|
||||||
|
[[ -n "$MYPASSWORD" ]] && my_connect+=" --password=$MYPASSWORD"
|
||||||
|
|
||||||
|
# Checks credentials and existence of the database given.
|
||||||
|
#
|
||||||
|
[[ -n "$MYCONTAINER" ]] \
|
||||||
|
&& databases=$("$DOCKER" exec $MYCONTAINER sh -c "mysql -N --batch $my_connect --execute='show databases;'" 2>/dev/null) \
|
||||||
|
|| databases=$("$MYSQL" -N --batch $my_connect --execute='show databases;' 2>/dev/null )
|
||||||
|
# Credentials?
|
||||||
|
[[ -z "$databases" ]] \
|
||||||
|
&& echo "$MSG_BADCRED ($MYUSER@$([[ -n "$MYCONTAINER" ]] && echo "$MYCONTAINER:")$MYHOST)." >&2 && exit 1
|
||||||
|
# Existence?
|
||||||
|
[[ ! "$databases" =~ (^|[[:space:]])"$MYDATABASE"($|[[:space:]]) ]] \
|
||||||
|
&& echo "$MSG_MISSINGDB \"$MYDATABASE\"." >&2 && exit 1
|
||||||
|
# We've the database connection and existence checked.
|
||||||
|
|
||||||
|
# Do we size the database?
|
||||||
|
#
|
||||||
|
dbsize=0
|
||||||
|
# It isn't relevant when we'll dump to the STDOUT.
|
||||||
|
if [ -n "$MYDUMP" ]; then
|
||||||
|
# Calculates the size of the database (KB).
|
||||||
|
SQLVERB='SELECT table_schema, '
|
||||||
|
SQLVERB+='ROUND(SUM(data_length + index_length) / 1024, 0) '
|
||||||
|
SQLVERB+="FROM information_schema.TABLES WHERE table_schema='$MYDATABASE' "
|
||||||
|
SQLVERB+="GROUP BY table_schema;"
|
||||||
|
if [ -n "$MYCONTAINER" ]; then
|
||||||
|
# Dockerized database.
|
||||||
|
dbsize=$("$DOCKER" exec $MYCONTAINER sh -c "echo \"$SQLVERB\" | mysql -N --batch $my_connect" 2>/dev/null | \
|
||||||
|
"$CUT" -d$'\t' -f2)
|
||||||
|
else
|
||||||
|
# Self-hosted database.
|
||||||
|
dbsize=$("$MYSQL" -N --batch $my_connect --execute="$SQLVERB" 2>/dev/null | \
|
||||||
|
"$CUT" -d$'\t' -f2)
|
||||||
|
fi
|
||||||
|
# Some sanitization
|
||||||
|
dbsize="${dbsize//[[:space:]]/}"
|
||||||
|
[[ -z "$dbsize" ]] && dbsize=0
|
||||||
|
[[ ! "$dbsize" =~ ^([[:digit:]])*$ ]] && dbsize=0
|
||||||
|
# On failure aborts here, except if it had forced.
|
||||||
|
if [ $dbsize -eq 0 ]; then echo -en "$MSG_FAILSIZE" | "$TEE" -a "$logfile"
|
||||||
|
if [ "$MYDUMPFORCED" ]; then
|
||||||
|
echo " - $MSG_FORCED" | "$TEE" -a "$logfile"
|
||||||
|
else
|
||||||
|
echo " - $MSG_ABORTED" | "$TEE" -a "$logfile"; exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've the database size.
|
||||||
|
|
||||||
|
# Checks the storage space available.
|
||||||
|
# Note, that we'll compare the size of the running database, not the dump!
|
||||||
|
# TODO: find a better estimate.
|
||||||
|
#
|
||||||
|
# It isn't relevant when we'll dump to the STDOUT or the database has no size.
|
||||||
|
if [ -n "$MYDUMP" -a $dbsize -gt 0 ]; then
|
||||||
|
# KB units
|
||||||
|
freespace=$("$DF" --output=avail -k "$("$DIRNAME" "$MYDUMP")" | $TAIL -n1) #"
|
||||||
|
# It is enough?
|
||||||
|
if [ $freespace -lt $dbsize ]; then echo -en "$MSG_NOSPACE" | "$TEE" -a "$logfile"
|
||||||
|
# On failure aborts here, except if it had forced.
|
||||||
|
if [ "$MYDUMPFORCED" ]; then
|
||||||
|
echo " - $MSG_FORCED" | "$TEE" -a "$logfile"
|
||||||
|
else
|
||||||
|
echo " - $MSG_ABORTED" | "$TEE" -a "$logfile"; exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've the space checked.
|
||||||
|
|
||||||
|
# Some cleanup.
|
||||||
|
#
|
||||||
|
[[ -n "$MYDUMP" && -f "$MYDUMP" ]] && rm "$MYDUMP" >/dev/null
|
||||||
|
[[ -n "$logfile" && -f "$logfile" ]] && rm "$logfile" >/dev/null
|
||||||
|
#
|
||||||
|
# Dumping.
|
||||||
|
#
|
||||||
|
if [ -n "$MYDUMP" ]; then
|
||||||
|
# Dumps into a file (then optionally compresses). Writes a separate log too.
|
||||||
|
# TODO: pipelined compress - doesn't work with Docker yet(?).
|
||||||
|
[[ -n "$MYCONTAINER" ]] \
|
||||||
|
&& "$DOCKER" exec $MYCONTAINER sh -c "mysqldump $my_connect $dumpparameters $MYOPTIONS $MYDATABASE" \
|
||||||
|
>"$MYDUMP" 2>>"$logfile" \
|
||||||
|
|| "$MYSQLDUMP" $my_connect $dumpparameters $MYOPTIONS $MYDATABASE \
|
||||||
|
>"$MYDUMP" 2>>"$logfile"
|
||||||
|
# Optional compression.
|
||||||
|
[[ -n "$compress" ]] && "$(which gzip)" "$MYDUMP" 2>/dev/null
|
||||||
|
else
|
||||||
|
# Dumps to STDOUT without logging.
|
||||||
|
[[ -n "$MYCONTAINER" ]] \
|
||||||
|
&& "$DOCKER" exec $MYCONTAINER sh -c "mysqldump $my_connect $dumpparameters $MYOPTIONS $MYDATABASE" \
|
||||||
|
2>/dev/null \
|
||||||
|
|| "$MYSQLDUMP" $my_connect $dumpparameters $MYOPTIONS $MYDATABASE \
|
||||||
|
2>>/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
526
.templates/bin/mysql_restoredb
Executable file
526
.templates/bin/mysql_restoredb
Executable file
@ -0,0 +1,526 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Restores a MySQL/MariaDB database from a native or dockerized RDBMS instance
|
||||||
|
# accessible from this box. Also creates an owner user for this database
|
||||||
|
# (if it doesn't exist) and grants the appropriate privileges.
|
||||||
|
#
|
||||||
|
# Needs MySQL v5.7.6 or MariaDB 10.1.3 (or later).
|
||||||
|
# To restore a database with the necessary user management and grants,
|
||||||
|
# needs the superuser privileges on RDBMS.
|
||||||
|
# * If the RDBMS runs dockerized you need call this script as a Docker manager
|
||||||
|
# user (member of the docker Linux group).
|
||||||
|
# * If we're using a native MySQL/MariaDB, you need call this script as a
|
||||||
|
# Linux user whom the superuser role has been already granted within RDBMS
|
||||||
|
# (via unix_socket authentication) or you need provide the superuser's
|
||||||
|
# credentials as well.
|
||||||
|
# Lack of this the script will skip the user management and grant steps.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# $0 [-U dbuser] [-P dbpass] [-h dbhost] [-p dbport]
|
||||||
|
# [-A dbadminuser] [-a dbadminpass] [-c characterset]
|
||||||
|
# [-C container] [-d database] [-f dumpfile ]
|
||||||
|
# [database (if not in -d)] [dumpfile (if not in -f)]
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# 2022-04-07 v0.4
|
||||||
|
# new: An option and a guess mechanism has added to set the default character set
|
||||||
|
# of the restored database.
|
||||||
|
# 2021-08-30 v0.3
|
||||||
|
# fix: Uses the defaults when MYDBA* variables aren't set.
|
||||||
|
# 2021-03-22 v0.2
|
||||||
|
# fix: A duplicated SQLVERB has blocked setting password for a newly created user.
|
||||||
|
# The unnecessary PASSWORD() call has been removed as well.
|
||||||
|
# fix: Typos.
|
||||||
|
# 2021-02-18 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
MYCONTAINER=${MYCONTAINER-""} # Docker container's name
|
||||||
|
MYCHARSET=${MYCHARSET-""} # Default character set for DB
|
||||||
|
MYDATABASE=${MYDATABASE-""} # Database name to restore
|
||||||
|
MYDBAUSER=${MYDBAUSER:-""} # Database admin superuser
|
||||||
|
MYDBAPASSWORD=${MYDBAPASSWORD:-""} # Credential for the DBA user
|
||||||
|
MYDUMP=${MYDUMP-""} # Dump file pathname
|
||||||
|
MYHOST=${MYHOST:-"localhost"} # Connection parameter
|
||||||
|
MYOPTIONS=${MYOPTIONS-""} # Options to pass to pg_dump
|
||||||
|
MYPASSWORD=${MYPASSWORD-""} # Credential for the DB owner
|
||||||
|
MYPORT=${MYPORT:-"3306"} # Connection parameter
|
||||||
|
MYUSER=${MYUSER:-"root"} # Owner of the restored DB
|
||||||
|
|
||||||
|
### Temporailly ignored! Need to sanitize.
|
||||||
|
MYOPTIONS=""
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
#
|
||||||
|
LANG=C
|
||||||
|
LC_ALL=C
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then
|
||||||
|
PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
#
|
||||||
|
LOGSTAMP="\"\$DATE\" +%Y-%m-%d\ %H:%M:%S" # Timestamp format for logs
|
||||||
|
MARIADBMIN="010001003" # MariaDB minimum version
|
||||||
|
MYSQLMIN="005007006" # MySQL minimum version
|
||||||
|
vetodatabases="information_schema mysql performance_schema sys"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_AMBIGOUS="The character set used within the dump is ambigous."
|
||||||
|
MSG_BADDBTYPE="Unknown database type"
|
||||||
|
MSG_BADDUMP="Doesn't exist or doesn't a dumpfile:"
|
||||||
|
MSG_BADOPT="Invalid option"
|
||||||
|
MSG_BADPARAM="Doubtful parameter:"
|
||||||
|
MSG_BLOCKING="This is a fatal error - restore has been aborted."
|
||||||
|
MSG_CONNTERM="DB connection(s) have forced to terminate"
|
||||||
|
MSG_DOCKERGRPNEED="You must be a member of the docker group."
|
||||||
|
MSG_DOESNOTRUN="Doesn't run the database container"
|
||||||
|
MSG_EXISTING="did not create exisiting object"
|
||||||
|
MSG_FAILCONN="Failed to connect the RDBMS."
|
||||||
|
MSG_FAILGRANT="Failet to grant privileges to user"
|
||||||
|
MSG_FAILKILL="Failed to kill active connection"
|
||||||
|
MSG_FAILPASS="Failed to set password to user"
|
||||||
|
MSG_FAILTOKILL="Failed to retrieve the active connections."
|
||||||
|
MSG_FAILVER="Failed to get the RDBMS version."
|
||||||
|
MSG_FAILUSER="Failed to create RDBMS user"
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_NONBLOCKING="Recoverable error - restore is continuing."
|
||||||
|
MSG_NONSUPER="DB user hasn't DBA (database superuser) privileges."
|
||||||
|
MSG_NONZERO="The result code is non zero"
|
||||||
|
MSG_OLDRDBMS="RDBMS version is too old"
|
||||||
|
MSG_PERCENT="Hint: you may use percent-encoding (e.g %40 instead of @)"
|
||||||
|
MSG_SUPERNEED="user must have DBA (database superuser) privileges."
|
||||||
|
|
||||||
|
MSG_USAGE="Usage: $0 [options] [database [dump_pathname]]\n"
|
||||||
|
MSG_USAGE+="Option:\tENVVAR:\n"
|
||||||
|
MSG_USAGE+=" -A\tMYDBAUSER \tMySQL/MariaDB DB admin superuser\n"
|
||||||
|
MSG_USAGE+=" -a\tMYDBAPASSWORD \tMySQL/MariaDB DB admin password\n"
|
||||||
|
MSG_USAGE+=" -c\tMYCHARSET \tMySQL/MariaDB DB character set\n"
|
||||||
|
MSG_USAGE+=" -C\tMYCONTAINER \tMySQL/MariaDB Docker container's name\n"
|
||||||
|
MSG_USAGE+=" -d\tMYDATABASE \tMySQL/MariaDB database to restore\n"
|
||||||
|
MSG_USAGE+=" -f\tMYDUMPFILE \tDumpfile pathname\n"
|
||||||
|
MSG_USAGE+=" -h\tMYHOST \tHostname or IP to connect (localhost)\n"
|
||||||
|
MSG_USAGE+=" -p\tMYPORT \tTCP port to connect (3306)\n"
|
||||||
|
MSG_USAGE+=" -P\tMYPASSWORD \tMySQL/MariaDB password\n"
|
||||||
|
MSG_USAGE+=" -U\tMYUSER \tMySQL/MariaDB username ($MYUSER)\n"
|
||||||
|
|
||||||
|
# Getting options.
|
||||||
|
#
|
||||||
|
while getopts ":-:a:A:c:C:d:D:f:h:H:p:P:u:U:" option
|
||||||
|
do
|
||||||
|
case ${option} in
|
||||||
|
"-" )
|
||||||
|
if [ "$OPTARG" = "help" ]; then echo -e "$MSG_USAGE" >&2; exit
|
||||||
|
else echo "$MSG_BADOPT --$OPTARG" >&2; exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
"A" ) MYDBAUSER="$OPTARG" ;;
|
||||||
|
"a" ) MYDBAPASSWORD="$OPTARG" ;;
|
||||||
|
"c" ) MYCHARSET="$OPTARG" ;;
|
||||||
|
"C" ) MYCONTAINER="$OPTARG" ;;
|
||||||
|
"d" | "D" ) MYDATABASE="$OPTARG" ;;
|
||||||
|
"f" ) MYDUMPFILE="$OPTARG" ;;
|
||||||
|
"h" | "H" ) MYHOST="$OPTARG" ;;
|
||||||
|
"P" ) MYPASSWORD="$OPTARG" ;;
|
||||||
|
"p" ) MYPORT="$OPTARG" ;;
|
||||||
|
"u" | "U" ) MYUSER="$OPTARG" ;;
|
||||||
|
\? ) echo "$MSG_BADOPT -$OPTARG" >&2; exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done; shift $((OPTIND -1))
|
||||||
|
# All options have been processed.
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
# Conditional dependencies (according to native or dockerized environment).
|
||||||
|
[[ -z "$MYCONTAINER" ]] \
|
||||||
|
&& additem="mysql" \
|
||||||
|
|| additem="docker"
|
||||||
|
# Common dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename cat cut date dirname file grep gunzip head id locale \
|
||||||
|
readlink printf sed sort tail tee wc $additem
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Sanitizing the parameters.
|
||||||
|
# Most of them are only arbitrary restrictions (reliable source: TODO!)
|
||||||
|
#
|
||||||
|
[[ -n "$MYDBAUSER" ]] && [[ ! "$MYDBAUSER" =~ ^([[:alnum:]]|[.-_\\+])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYDBAUSER\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
#
|
||||||
|
[[ -n "$MYDBAPASSWORD" ]] && [[ ! "$MYDBAPASSWORD" =~ ^([[:alnum:]]|[ !~&#$<>()%+-_.])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYDBAPASSWORD\n$MSG_PERCENT\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
#
|
||||||
|
[[ -n "$MYCONTAINER" ]] && [[ ! "$MYCONTAINER" =~ ^([[:alnum:]]|[-_])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYCONTAINER\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
#
|
||||||
|
[[ -n "$MYDATABASE" ]] && [[ ! "$MYDATABASE" =~ ^([[:alnum:]]|[_])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYDATABASE\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
#
|
||||||
|
[[ -n "$MYDUMPFILE" ]] && [[ ! "$MYDUMPFILE" =~ ^([[:alnum:]]|[ .-_/])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYDUMPFILE\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://tools.ietf.org/html/rfc1123#page-13 (relaxed)
|
||||||
|
[[ -z "$MYHOST" ]] && MYHOST="localhost"
|
||||||
|
[[ -n "$MYHOST" ]] && [[ ! "$MYHOST" =~ ^([[:alnum:]]|[.-])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYHOST\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://tools.ietf.org/html/rfc6056 (relaxed)
|
||||||
|
[[ -z "$MYPORT" ]] && MYPORT=3306
|
||||||
|
[[ -n "$MYPORT" ]] && [[ ! "$MYPORT" =~ ^[1-9]([[:digit:]]){0,4}$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYPORT\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
#
|
||||||
|
[[ -n "$MYPASSWORD" ]] && [[ ! "$MYPASSWORD" =~ ^([[:alnum:]]|[ !~&#$<>()%+-_.])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYPASSWORD\n$MSG_PERCENT\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
#
|
||||||
|
[[ -n "$MYUSER" ]] && [[ ! "$MYUSER" =~ ^([[:alnum:]]|[.-_\\+])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $MYUSER\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# We've at least a minimally checked parameters.
|
||||||
|
|
||||||
|
# Need to be root or a Docker manager user if the DB runs in a container.
|
||||||
|
#
|
||||||
|
[[ -n "$MYCONTAINER" ]] && [[ "$USER" != 'root' ]] \
|
||||||
|
&& [[ -z "$(echo "$("$ID" -Gn "$USER") " | "$GREP" ' docker ')" ]] \
|
||||||
|
&& echo "$MSG_DOCKERGRPNEED" >&2 && exit 1 #"
|
||||||
|
|
||||||
|
# If the RDBMS is dockerized the container must be running.
|
||||||
|
#
|
||||||
|
[[ -n "$MYCONTAINER" ]] \
|
||||||
|
&& [[ -z "$("$DOCKER" ps -q -f name=$MYCONTAINER)" ]] \
|
||||||
|
&& echo "$MSG_DOESNOTRUN $MYCONTAINER" >&2 && exit 1
|
||||||
|
|
||||||
|
# Determines the database to restore.
|
||||||
|
#
|
||||||
|
# Lack of -d the 1st non-option parameter is the database's name.
|
||||||
|
if [ -z "$MYDATABASE" -a -n "$1" ]; then MYDATABASE="$1"; shift; fi
|
||||||
|
# The database's name is mandatory.
|
||||||
|
if [ -z "$MYDATABASE" ]
|
||||||
|
then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# A humble sanitization.
|
||||||
|
if [[ ! "$MYDATABASE" =~ ^([[:alnum:]]|[_])*$ ]]
|
||||||
|
then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# Silently refuses the MySQL/MariaDB internal databases.
|
||||||
|
for veto in $vetodatabases ""
|
||||||
|
do
|
||||||
|
[[ "$MYDATABASE" = "$veto" ]] && exit 0
|
||||||
|
done
|
||||||
|
# We've a database name to restore.
|
||||||
|
|
||||||
|
# Determines the dumpfile.
|
||||||
|
#
|
||||||
|
# Lack of -f the 2nd non-option parameter is the database's name.
|
||||||
|
if [ -z "$MYDUMPFILE" -a -n "$1" ]; then MYDUMPFILE="$1"; shift; fi
|
||||||
|
# The dumpfile is mandatory.
|
||||||
|
if [ -z "$MYDUMPFILE" ]
|
||||||
|
then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# The MYDUMPFILE must point to a readable file.
|
||||||
|
# If it is an existing symlink dereferences it to ensure, it points to a file.
|
||||||
|
if [ -h "$MYDUMPFILE" ]; then
|
||||||
|
if [[ "$("$READLINK" "$MYDUMPFILE")" != /* ]]
|
||||||
|
# relative path in symlink
|
||||||
|
then MYDUMPFILE="$("$DIRNAME" "$MYDUMPFILE")/$("$READLINK" "$MYDUMPFILE")"
|
||||||
|
# absolute path in symlink
|
||||||
|
else MYDUMPFILE="$("$READLINK" "$MYDUMPFILE")"; fi
|
||||||
|
fi
|
||||||
|
# Let's check it!
|
||||||
|
if [ ! -r "$MYDUMPFILE" -o ! -f "$MYDUMPFILE" ]
|
||||||
|
then echo -e "$MSG_BADDUMP $MYDUMPFILE"; exit 1; fi
|
||||||
|
# We've an existing dumpfile.
|
||||||
|
|
||||||
|
# Tries to get the locale settings (actually CHARACTER SET) of this dump.
|
||||||
|
#
|
||||||
|
if [ -z "$MYCHARSET" ]; then
|
||||||
|
# Let's identify the file is gzipped or not.
|
||||||
|
UNPACKER=$("$FILE" --mime-type "$MYDUMPFILE")
|
||||||
|
UNPACKER=${UNPACKER##* } # The last word is the MIME-type.
|
||||||
|
# We'll use gunzip or cat (a dummy unzipper), according to the MIME type.
|
||||||
|
[[ "$UNPACKER" = 'application/gzip' ]] \
|
||||||
|
&& UNPACKER="$GUNZIP" \
|
||||||
|
|| UNPACKER="$CAT"
|
||||||
|
# Collects all character set adjustments from the dumpfile.
|
||||||
|
MYCHARSET=$("$CAT" "$MYDUMPFILE" | "$UNPACKER" | "$GREP" -B2 -i 'CREATE TABLE' | \
|
||||||
|
"$GREP" -i 'character_set_client =' | "$SORT" -u)
|
||||||
|
# Trims the character set's name itself (the first word after the equal sign).
|
||||||
|
[[ -n "$MYCHARSET" ]] && MYCHARSET=$(echo -e "$MYCHARSET" | "$SED" 's/^.*= \(.*\) .*$/\1/') #'
|
||||||
|
fi
|
||||||
|
# We've a raw guess about the character sets used.
|
||||||
|
|
||||||
|
# Finds the LOGFILE to use.
|
||||||
|
#
|
||||||
|
# If the folder containing the MYDUMPFILE is writable, we will use a
|
||||||
|
# logfile with the same name as the dumpfile but with .log extension.
|
||||||
|
[[ -w "$("$DIRNAME" "$MYDUMPFILE")" ]] \
|
||||||
|
&& LOGFILE="${MYDUMPFILE%.gz}" \
|
||||||
|
&& LOGFILE="${LOGFILE%.*}.log" \
|
||||||
|
|| LOGFILE="/dev/null"
|
||||||
|
# We've a suitable logfile.
|
||||||
|
|
||||||
|
# Opens the log and takes care to close it when finish.
|
||||||
|
#
|
||||||
|
echo "$(eval $LOGSTAMP) Starting job #$$ $("$TR" '\0' ' ' < /proc/$$/cmdline)" | \
|
||||||
|
"$TEE" -a "$LOGFILE"
|
||||||
|
# Sets a trap to make always a corresponding exit log entry as well.
|
||||||
|
function close_log() {
|
||||||
|
echo -e "$(eval $LOGSTAMP) Finished job #$$ $("$TR" '\0' ' ' < /proc/$$/cmdline)\n" | \
|
||||||
|
"$TEE" -a "$LOGFILE"
|
||||||
|
}
|
||||||
|
trap -- 'close_log' EXIT
|
||||||
|
# We started logging.
|
||||||
|
|
||||||
|
# Prepopulates the SQL command skeleton (macro).
|
||||||
|
#
|
||||||
|
# This skeleton makes the SQL calls independent to the environment
|
||||||
|
# (native or dockerized) and credentials. We need only actualize the
|
||||||
|
# CONNECT, DATABASE and SQLVERB clauses then eval $DO_SQLVERB.
|
||||||
|
# Warning: the parameters must had been sanitized!
|
||||||
|
DO_SQLVERB=""
|
||||||
|
DO_SQLVERB+="export MYSQL_PWD; "
|
||||||
|
DO_SQLVERB+="\"\$MYSQL\" \$CONNECT -N \$DATABASE "
|
||||||
|
DO_SQLVERB+="-e \"\$SQLVERB\""
|
||||||
|
# We've a suitable SQL macro.
|
||||||
|
|
||||||
|
# Do we connect the database as a DBA?
|
||||||
|
#
|
||||||
|
SQLVERB="SELECT 1;"
|
||||||
|
# Sets the default DBA username for dockerized and native RDBMS as well.
|
||||||
|
if [ -z "$MYDBAUSER" ]; then
|
||||||
|
[[ -n "$MYCONTAINER" ]] \
|
||||||
|
&& MYDBAUSER="root" \
|
||||||
|
|| MYDBAUSER="$USER"
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
# We'll try the local connection (Unix-domain socket) first.
|
||||||
|
CONNECT=""
|
||||||
|
DATABASE=""
|
||||||
|
#result=$(eval "$DO_SQLVERB" 2>/dev/null); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
if [ "$result" != "1" ]; then
|
||||||
|
#
|
||||||
|
# On failure we'll try the TCP connection.
|
||||||
|
MYSQL_PWD="$MYDBAPASSWORD"
|
||||||
|
CONNECT="-u $MYDBAUSER -h $MYHOST -P $MYPORT"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2>/dev/null); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
if [ "$result" != "1" ]; then
|
||||||
|
#
|
||||||
|
# On failure we'll try the TCP connection with non-DBA credentials.
|
||||||
|
MYSQL_PWD="$MYPASSWORD"
|
||||||
|
CONNECT="-u $MYUSER -h $MYHOST -P $MYPORT"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2>/dev/null); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
if [ "$result" != "1" ]; then
|
||||||
|
#
|
||||||
|
# On failure we'll give up here.
|
||||||
|
[[ "$result" != "1" ]] \
|
||||||
|
&& echo -e "$MSG_FAILCONN" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've valid MYSQL_PWD and CONNECT clauses.
|
||||||
|
|
||||||
|
# Checks the superuser privilege.
|
||||||
|
# Better check: TODO!
|
||||||
|
ISDBA=false
|
||||||
|
DATABASE=""
|
||||||
|
SQLVERB="SHOW GRANTS;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
if [[ $excode -eq 0 && \
|
||||||
|
"$result" =~ ^GRANTALLPRIVILEGESON\*\.\*.*WITHGRANTOPTION$ ]]; then
|
||||||
|
ISDBA=true
|
||||||
|
else
|
||||||
|
echo -e "$MSG_NONSUPER" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
fi
|
||||||
|
# We know we're a DB superuser or not.
|
||||||
|
|
||||||
|
# Following steps need the superuser privileges.
|
||||||
|
# Lack of this we'll skip them.
|
||||||
|
if $ISDBA; then
|
||||||
|
DATABASE="mysql"
|
||||||
|
|
||||||
|
# Checks the minimal MySQL/MariaDB version.
|
||||||
|
#
|
||||||
|
SQLVERB="SELECT version();"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
[[ -z "$result" ]] \
|
||||||
|
&& echo -e "$MSG_FAILVER" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# Let's extract a comparable RDBMS version from the result.
|
||||||
|
dbversion=${result%%-*} # strips anyone after 1st dash (including)
|
||||||
|
dbversion=(${dbversion//./ }) # converts to an array
|
||||||
|
dbversion=$("$PRINTF" '%03d%03d%03d' ${dbversion[@]}) # 3 times 3 digits 0-padded
|
||||||
|
if [ -n "$(echo "$result" | "$GREP" -i "mariadb")" ]; then
|
||||||
|
# MariaDB version check.
|
||||||
|
(( dbversion < MARIADBMIN )) \
|
||||||
|
&& echo -e "$MSG_OLDRDBMS: $result" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
else
|
||||||
|
# MySQL version check.
|
||||||
|
(( dbversion < MYSQLMIN )) \
|
||||||
|
&& echo -e "$MSG_OLDRDBMS: $result" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
fi
|
||||||
|
# RDBMS version is proper.
|
||||||
|
|
||||||
|
# Creates the database user (owner) if it doesn't exist.
|
||||||
|
#
|
||||||
|
echo -e "CREATE USER" | "$TEE" -a "$LOGFILE"
|
||||||
|
SQLVERB=" CREATE USER '$MYUSER'@'$MYHOST'; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
if [[ $excode -ne 0 ]]; then
|
||||||
|
# Already exists (or something went wrong).
|
||||||
|
echo -e "$MSG_FAILUSER $MYUSER@$MYHOST" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
else
|
||||||
|
# Sets the password only if the user has just created.
|
||||||
|
echo -e "SET PASSWORD" | "$TEE" -a "$LOGFILE"
|
||||||
|
SQLVERB="SET PASSWORD FOR '$MYUSER'@'$MYHOST' = '$MYPASSWORD'; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_FAILPASS $MYUSER@$MYHOST" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
# Grants all privileges on the database to the user.
|
||||||
|
#
|
||||||
|
echo -e "GRANT" | "$TEE" -a "$LOGFILE"
|
||||||
|
SQLVERB="GRANT ALL PRIVILEGES ON $MYDATABASE.* TO '$MYUSER'@'$MYHOST'; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_FAILGRANT $MYUSER@$MYHOST" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# We've the database user with the proper password.
|
||||||
|
|
||||||
|
# Drops all existing connections to the database being restored.
|
||||||
|
#
|
||||||
|
echo -e "KILL CONNECTIONS" | "$TEE" -a "$LOGFILE"
|
||||||
|
# List of the active connections.
|
||||||
|
SQLVERB="SELECT id FROM information_schema.processlist "
|
||||||
|
SQLVERB+="WHERE db = '$MYDATABASE';"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
if [[ $excode -ne 0 ]]; then
|
||||||
|
echo -e "$MSG_FAILTOKILL" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
else
|
||||||
|
# Enumerates and tries to kill these connections.
|
||||||
|
for connection in $result ""
|
||||||
|
do
|
||||||
|
if [ -n "$connection" ]; then
|
||||||
|
SQLVERB="KILL $connection;"
|
||||||
|
eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2); excode=$?
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_FAILKILL $connection" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
# Connections have eliminated (we hope).
|
||||||
|
fi
|
||||||
|
# Done with the superuser part.
|
||||||
|
|
||||||
|
# Drops the database.
|
||||||
|
#
|
||||||
|
echo -e "DROP DATABASE" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE=""
|
||||||
|
SQLVERB="DROP DATABASE IF EXISTS $MYDATABASE;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
|
||||||
|
# Recreates the database.
|
||||||
|
#
|
||||||
|
echo -e "CREATE DATABASE" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE=""
|
||||||
|
SQLVERB="CREATE DATABASE $MYDATABASE;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# We've an empty database.
|
||||||
|
|
||||||
|
# Sets the default character set.
|
||||||
|
#
|
||||||
|
if [ -n "$MYCHARSET" ]; then
|
||||||
|
echo -e "ALTER CHARACTER SET" | "$TEE" -a "$LOGFILE"
|
||||||
|
# If it is ambigous, we'll ignore it.
|
||||||
|
if [ "$(echo -e "$MYCHARSET" | "$WC" -l)" -ne 1 ]; then
|
||||||
|
echo -e "$MSG_AMBIGOUS" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
MYCHARSET=""
|
||||||
|
else
|
||||||
|
# Let's set it.
|
||||||
|
DATABASE="$MYDATABASE"
|
||||||
|
SQLVERB="ALTER DATABASE $MYDATABASE CHARACTER SET $MYCHARSET;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've the character set adjusted.
|
||||||
|
|
||||||
|
# Restores the database from the dump.
|
||||||
|
#
|
||||||
|
# This isn't so straightforward as in PostgreSQL.
|
||||||
|
# We'll use the database user's credentials, not the superuser's
|
||||||
|
# to mitigate the effect of an unsanitized dump.
|
||||||
|
echo -e "RESTORE" | "$TEE" -a "$LOGFILE"
|
||||||
|
# Let's identify the file is gzipped or not.
|
||||||
|
UNPACKER=$("$FILE" --mime-type "$MYDUMPFILE")
|
||||||
|
UNPACKER=${UNPACKER##* } # The last word is the MIME-type.
|
||||||
|
# We'll use gunzip or cat (a dummy unzipper), according to the MIME type.
|
||||||
|
[[ "$UNPACKER" = 'application/gzip' ]] \
|
||||||
|
&& UNPACKER="$GUNZIP" \
|
||||||
|
|| UNPACKER="$CAT"
|
||||||
|
# This is a sed expression to modify the security definers within the dump.
|
||||||
|
MOD_DEFINER="s/DEFINER=.*@[^ ]*/DEFINER=\`$MYUSER\`@\`$MYHOST\`/"
|
||||||
|
# Considers the RDBMS environment.
|
||||||
|
if [ -n "$MYCONTAINER" ]; then
|
||||||
|
# Dockerized RDBMS.
|
||||||
|
echo "MySQL dockerized - TODO!" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
else
|
||||||
|
# Native RDBMS.
|
||||||
|
# Reads the dump, on the fly unpacks it and modifies the scurity definer,
|
||||||
|
# then passes the data stream to the MySQL client.
|
||||||
|
"$CAT" "$MYDUMPFILE" | "$UNPACKER" | "$SED" "$MOD_DEFINER" | \
|
||||||
|
"$MYSQL" -u "$MYUSER" -p$MYPASSWORD -h "$MYHOST" -P "$MYPORT" \
|
||||||
|
-f -D "$MYDATABASE" \
|
||||||
|
>/dev/null 2> >("$TEE" -a "$LOGFILE" >&2); excode=$?
|
||||||
|
# Unfortunately the result code doesn't differentiate the
|
||||||
|
# blocking and non-blocking states.
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONZERO: $excode" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
fi
|
||||||
|
# We had a try to restore the database - the result isn't properly defined.
|
||||||
|
|
||||||
|
# Closing log entry will be handled via EXIT trap.
|
||||||
|
#
|
||||||
|
# That's all, Folks! :)
|
396
.templates/bin/psql_dumpdb
Executable file
396
.templates/bin/psql_dumpdb
Executable file
@ -0,0 +1,396 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Dumps a PostgreSQL database from a native or dockerized PostgreSQL instance
|
||||||
|
# running on this box. This is a wrapper script to the pg_dump command.
|
||||||
|
# Uses only the TCP connection, therefore you must enable this in pg_hba file.
|
||||||
|
#
|
||||||
|
# If the PostgreSQL is dockerized you need call as a Docker manager user
|
||||||
|
# (member of the docker Linux group).
|
||||||
|
#
|
||||||
|
# Accepts few pg_dump options as well as the optional database password
|
||||||
|
# and the optional output pathname:
|
||||||
|
#
|
||||||
|
# $0 [-U dbuser] [-P dbpass] [-h dbhost] [-p dbport]
|
||||||
|
# [-C container] [-d database] [-f dumpfile ] [-F dumpformat ]
|
||||||
|
# [--acl ] [--force]
|
||||||
|
# [database (if not in -d)] [dumpfile (if not in -f)]
|
||||||
|
#
|
||||||
|
# A special -F9 option makes a PostgreSQL 9.x compatible plain text dump.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# 2022-11-03 v0.4
|
||||||
|
# mod: May use the peer authentication as a secondary preference.
|
||||||
|
# fix: pg_restore output overwrited the previous log lines, this has been fixed.
|
||||||
|
# 2022-01-23 v0.3
|
||||||
|
# mod: More sophisticated estimate made on storage space needed.
|
||||||
|
# 2021-03-19 v0.2
|
||||||
|
# new: Option --acl (include ACLs as well) has been added.
|
||||||
|
# new: Option -F9 (PSQL 9.x compatible plain dump) has been added (native only).
|
||||||
|
# mod: Plain text dumps are compressed now.
|
||||||
|
# mod: Typos and comments.
|
||||||
|
# fix: The sbin directories has appended to the $PATH (Debian doesn't add them).
|
||||||
|
# 2020-09-17 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
PGCONTAINER=${PGCONTAINER-""} # Docker container's name
|
||||||
|
PGDATABASE=${PGDATABASE-""} # Database name to dump
|
||||||
|
PGDUMP=${PGDUMP-""} # Dump file pathname
|
||||||
|
PGDUMPFORMAT=${PGDUMPFORMAT-"c"} # Dump file format
|
||||||
|
PGHOST=${PGHOST:-"localhost"} # Connection parameter
|
||||||
|
PGOPTIONS=${PGOPTIONS-""} # Options to pass to pg_dump
|
||||||
|
PGPASSWORD=${PGPASSWORD-""} # Credential for the DB user
|
||||||
|
PGPORT=${PGPORT:-"5432"} # Connection parameter
|
||||||
|
PGUSER=${PGUSER:-"postgres"} # DB user for this dump
|
||||||
|
|
||||||
|
### Temporailly ignored! Need to sanitize.
|
||||||
|
PGOPTIONS=""
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
#
|
||||||
|
PGDUMPACLS="--no-acl --no-owner" # Excludes the ACLs and grants.
|
||||||
|
PGDUMPFORCED="" # Dumps despite failed checks
|
||||||
|
vetodatabases="postgres template0 template1" # Technical DBs aren't to dump
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_ABORTED="aborted"
|
||||||
|
MSG_BADCRED="Bad credentials for MySQL"
|
||||||
|
MSG_BADDUMPPATH="Dumpfile's directory isn't writable"
|
||||||
|
MSG_BADOPT="Invalid option"
|
||||||
|
MSG_DOESNOTRUN="Doesn't run the database container"
|
||||||
|
MSG_DOCKERGRPNEED="You must be a member of the docker group."
|
||||||
|
MSG_FAILBKP="Archiver exited with error code"
|
||||||
|
MSG_FAILDB="Unable to dump the database"
|
||||||
|
MSG_FAILCONN="Failed to connect the database"
|
||||||
|
MSG_FAILSIZE="Failed to size the database"
|
||||||
|
MSG_FORCED="but forced to continue"
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_NOCOMP="Fatal: missing component"
|
||||||
|
MSG_NODIRDUMP="Directory format doesn't implemented with a dockerized database."
|
||||||
|
MSG_NOSPACE="Not enough space to dump the database"
|
||||||
|
MSG_PEERAUTH="Peer authentication has been used."
|
||||||
|
|
||||||
|
MSG_USAGE="Usage: $0 [options] [database [dump_pathname|-]]\n"
|
||||||
|
MSG_USAGE+="Option:\tENVVAR:\n"
|
||||||
|
MSG_USAGE+=" -C\tPGCONTAINER\tPostgres Docker container's name\n"
|
||||||
|
MSG_USAGE+=" -d\tPGDATABASE\tPostgres database to dump ($USER)\n"
|
||||||
|
MSG_USAGE+=" -f\tPGDUMP\t\tDumpfile pathname\n"
|
||||||
|
MSG_USAGE+=" -F\tPGDUMPFORMAT\tDumpfile format ($PGDUMPFORMAT)\n"
|
||||||
|
MSG_USAGE+=" -h\tPGHOST\t\tHostname or IP to connect (localhost)\n"
|
||||||
|
MSG_USAGE+=" -p\tPGPORT\t\tTCP port to connect (5432)\n"
|
||||||
|
MSG_USAGE+=" -P\tPGPASSWORD\tPostgres password\n"
|
||||||
|
MSG_USAGE+=" -U\tPGUSER\t\tPostgres username ($PGUSER)\n"
|
||||||
|
MSG_USAGE+="--acl\t\t\tIncludes the grants and ACLs as well\n"
|
||||||
|
MSG_USAGE+="--force\t\t\tForces the operation despite the failed checks\n"
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
LANG=C
|
||||||
|
LC_ALL=C
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then
|
||||||
|
PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Getting options.
|
||||||
|
#
|
||||||
|
while getopts ":-:C:d:D:f:F:h:H:p:P:u:U:" option
|
||||||
|
do
|
||||||
|
case ${option} in
|
||||||
|
"-" )
|
||||||
|
if [ "$OPTARG" = "acl" ]; then PGDUMPACLS=""
|
||||||
|
elif [ "$OPTARG" = "force" ]; then PGDUMPFORCED="yes"
|
||||||
|
elif [ "$OPTARG" = "help" ]; then echo -e "$MSG_USAGE" >&2; exit
|
||||||
|
else echo "$MSG_BADOPT --$OPTARG" >&2; exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
"C" ) PGCONTAINER="$OPTARG" ;;
|
||||||
|
"d" | "D" ) PGDATABASE="$OPTARG" ;;
|
||||||
|
"f" ) PGDUMPFILE="$OPTARG" ;;
|
||||||
|
"F" ) PGDUMPFORMAT="$OPTARG" ;;
|
||||||
|
"h" | "H" ) PGHOST="$OPTARG" ;;
|
||||||
|
"P" ) PGPASSWORD="$OPTARG" ;;
|
||||||
|
"p" ) PGPORT="$OPTARG" ;;
|
||||||
|
"u" | "U" ) PGUSER="$OPTARG" ;;
|
||||||
|
\? )
|
||||||
|
echo "$MSG_BADOPT -$OPTARG" >&2; exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done; shift $((OPTIND -1))
|
||||||
|
# All options have been processed.
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
# Conditional dependencies (according to native or dockerized environment).
|
||||||
|
[[ -z "$PGCONTAINER" ]] \
|
||||||
|
&& additem="psql pg_dump" \
|
||||||
|
|| additem="docker"
|
||||||
|
# Common dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename df date dirname egrep grep gzip hostname id \
|
||||||
|
pwd sed tail tee $additem
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
#
|
||||||
|
# An additional bugfix (use "$(which gzip)" instead of "$GZIP"):
|
||||||
|
# https://www.gnu.org/software/gzip/manual/html_node/Environment.html
|
||||||
|
GZIP=""
|
||||||
|
|
||||||
|
# Need to be root or a Docker manager user if the DB runs in a container.
|
||||||
|
#
|
||||||
|
[[ -n "$PGCONTAINER" ]] && [[ "$USER" != 'root' ]] \
|
||||||
|
&& [[ -z "$(echo "$("$ID" -Gn "$USER") " | "$GREP" ' docker ')" ]] \
|
||||||
|
&& echo "$MSG_DOCKERGRPNEED" >&2 && exit 1 #"
|
||||||
|
|
||||||
|
# If the PostgreSQL is dockerized the container must be running.
|
||||||
|
#
|
||||||
|
[[ -n "$PGCONTAINER" ]] \
|
||||||
|
&& [[ -z "$("$DOCKER" ps -q -f name=$PGCONTAINER)" ]] \
|
||||||
|
&& echo "$MSG_DOESNOTRUN $PGCONTAINER" >&2 && exit 1
|
||||||
|
|
||||||
|
# Determines the database to dump.
|
||||||
|
#
|
||||||
|
# Lack of -d the 1st non-option parameter is the database's name.
|
||||||
|
if [ -z "$PGDATABASE" -a -n "$1" ]; then PGDATABASE="$1"; shift; fi
|
||||||
|
# The last resort is the Linux user's name.
|
||||||
|
if [ -z "$PGDATABASE" ]; then PGDATABASE="$USER"; fi
|
||||||
|
# A humble sanitization.
|
||||||
|
if [[ ! "$PGDATABASE" =~ ^([[:alnum:]]|[_])*$ ]]; then
|
||||||
|
echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# Silently refuses the PostgreSQL internal databases.
|
||||||
|
for veto in $vetodatabases ""
|
||||||
|
do
|
||||||
|
[[ "$PGDATABASE" = "$veto" ]] && exit 0
|
||||||
|
done
|
||||||
|
# We've a database name to dump.
|
||||||
|
|
||||||
|
# Determines the output file (or maybe a target directory) and the logfile.
|
||||||
|
#
|
||||||
|
# A generated file- or directory name maybe necessary.
|
||||||
|
DUMPNAME="$PGDATABASE.$("$DATE" '+%Y%m%d_%H%M%S').$("$HOSTNAME")"
|
||||||
|
# Lack of -f the next non-option parameter is the dumpfile's pathname.
|
||||||
|
if [ -z "$PGDUMPFILE" -a -n "$1" ]; then PGDUMPFILE="$1"; shift; fi
|
||||||
|
# The last resort is the generated pathname.
|
||||||
|
if [ -z "$PGDUMPFILE" ]; then PGDUMPFILE="$("$PWD")/$DUMPNAME"; fi
|
||||||
|
#
|
||||||
|
# Let's make some checks.
|
||||||
|
#
|
||||||
|
# Dumping to the STDOUT is invalid with the directory format, we'll dump
|
||||||
|
# into a newly created directory instead.
|
||||||
|
[[ "$PGDUMPFILE" = "-" ]] && [[ "$PGDUMPFORMAT" = "d" ]] \
|
||||||
|
&& PGDUMPFILE="$("$PWD")/$DUMPNAME"
|
||||||
|
# If the given pathname is an existing directory, we need append
|
||||||
|
# a generated target filename (or directory name for Fd format).
|
||||||
|
[[ -d "$PGDUMPFILE" ]] && PGDUMPFILE+="/$DUMPNAME"
|
||||||
|
#
|
||||||
|
# Here we go with the desired pathname.
|
||||||
|
#
|
||||||
|
if [ "$PGDUMPFILE" = "-" ]; then
|
||||||
|
# If '-' was given as the PGDUMPFILE, we'll write to STDOUT w/o logs.
|
||||||
|
PGDUMPFILE=""
|
||||||
|
logfile="/dev/null"
|
||||||
|
else
|
||||||
|
# We'll write and log to a directory within the filesystem.
|
||||||
|
PGDUMPDIR="$("$DIRNAME" "$PGDUMPFILE")"
|
||||||
|
# This directory must be exist and be writable.
|
||||||
|
if [ -n "$PGDUMPDIR" ] && [ ! -d "$PGDUMPDIR" -o ! -x "$PGDUMPDIR" ]; then
|
||||||
|
echo "$MSG_BADDUMPPATH: $PGDUMPDIR" >&2; exit 1
|
||||||
|
fi
|
||||||
|
# Extends the output files properly.
|
||||||
|
if [ "$PGDUMPFORMAT" = "d" ]; then
|
||||||
|
logfile="$PGDUMPFILE.log"
|
||||||
|
elif [ "$PGDUMPFORMAT" = "9" -o "$PGDUMPFORMAT" = "p" ]; then
|
||||||
|
PGDUMPFILE="${PGDUMPFILE%.sql}.sql"
|
||||||
|
logfile="${PGDUMPFILE%.sql}.log"
|
||||||
|
elif [ "$PGDUMPFORMAT" = "t" ]; then
|
||||||
|
PGDUMPFILE="${PGDUMPFILE%.tar}.tar"
|
||||||
|
logfile="${PGDUMPFILE%.tar}.log"
|
||||||
|
else
|
||||||
|
PGDUMPFILE="${PGDUMPFILE%.dmp}.dmp"
|
||||||
|
logfile="${PGDUMPFILE%.dmp}.log"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've a suitable output and log pathname (or we've to use the STDOUT w/o logs).
|
||||||
|
|
||||||
|
# Do we connect the database?
|
||||||
|
#
|
||||||
|
if [ -n "$PGCONTAINER" ]; then
|
||||||
|
# Dockerized database.
|
||||||
|
result=$("$DOCKER" exec $PGCONTAINER \
|
||||||
|
sh -c "export PGPASSWORD=\"$PGPASSWORD\"; \
|
||||||
|
psql -U \"$PGUSER\" -w -h \"$PGHOST\" -p \"$PGPORT\" \
|
||||||
|
-d \"$PGDATABASE\" -t -c \"SELECT 1\"" 2>/dev/null)
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $result -ne 1 ]] && echo -e "$MSG_FAILCONN." >&2 | "$TEE" -a "$logfile" && exit 1
|
||||||
|
else
|
||||||
|
# Self-hosted database.
|
||||||
|
# Preferred method: TCP with username and password.
|
||||||
|
CONNECT="-U $PGUSER -w -h $PGHOST -p $PGPORT"
|
||||||
|
export PGPASSWORD
|
||||||
|
"$PSQL" $CONNECT -d "$PGDATABASE" -c "SELECT 1" >/dev/null 2>&1; result=$?
|
||||||
|
if [[ $result -ne 0 ]]; then
|
||||||
|
# On failure we will try the peer authentication.
|
||||||
|
CONNECT=""
|
||||||
|
"$PSQL" $CONNECT -d "$PGDATABASE" -c "SELECT 1" >/dev/null 2>&1; result=$?
|
||||||
|
[[ $result -ne 0 ]] && echo -e "$MSG_FAILCONN." >&2 | "$TEE" -a "$logfile" && exit $result
|
||||||
|
# Leaves a warning about using the peer authentication.
|
||||||
|
echo -e "$MSG_PEERAUTH" >>"$logfile"
|
||||||
|
fi
|
||||||
|
# We've a valid CONNECT clause.
|
||||||
|
fi
|
||||||
|
# We've the database connect checked.
|
||||||
|
|
||||||
|
# Do we size the database?
|
||||||
|
#
|
||||||
|
dbsize=0
|
||||||
|
# It isn't relevant when we'll dump to the STDOUT.
|
||||||
|
if [ -n "$PGDUMPFILE" ]; then
|
||||||
|
# Calculates the size of the database (MB).
|
||||||
|
if [ -n "$PGCONTAINER" ]; then
|
||||||
|
# Dockerized database.
|
||||||
|
dbsize=$("$DOCKER" exec $PGCONTAINER \
|
||||||
|
sh -c "export PGPASSWORD=\"$PGPASSWORD\"; \
|
||||||
|
psql -U \"$PGUSER\" -w -h \"$PGHOST\" -p \"$PGPORT\" -d \"$PGDATABASE\" \
|
||||||
|
-t -c \"SELECT pg_database_size('$PGDATABASE');\"" 2>/dev/null)
|
||||||
|
else
|
||||||
|
# Self-hosted database.
|
||||||
|
export PGPASSWORD
|
||||||
|
dbsize=$("$PSQL" $CONNECT -d "$PGDATABASE" \
|
||||||
|
-t -c "SELECT pg_database_size('$PGDATABASE');" 2>/dev/null)
|
||||||
|
fi
|
||||||
|
# Some sanitization
|
||||||
|
dbsize="${dbsize//[[:space:]]/}"
|
||||||
|
[[ -z "$dbsize" ]] && dbsize=0
|
||||||
|
[[ ! "$dbsize" =~ ^([[:digit:]])*$ ]] && dbsize=0
|
||||||
|
# KB units
|
||||||
|
dbsize=$(( dbsize /1024 ))
|
||||||
|
# On failure aborts here, except if it had forced.
|
||||||
|
if [ $dbsize -eq 0 ]; then echo -en "$MSG_FAILSIZE" | "$TEE" -a "$logfile"
|
||||||
|
if [ "$PGDUMPFORCED" ]; then
|
||||||
|
echo " - $MSG_FORCED" | "$TEE" -a "$logfile"
|
||||||
|
else
|
||||||
|
echo " - $MSG_ABORTED" | "$TEE" -a "$logfile"; exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've the database size.
|
||||||
|
|
||||||
|
# Checks the storage space available.
|
||||||
|
#
|
||||||
|
# It isn't relevant when we'll dump to the STDOUT or the database has no size.
|
||||||
|
if [ -n "$PGDUMPFILE" -a "$dbsize" -gt 0 ]; then
|
||||||
|
# Let's estimate the dump size.
|
||||||
|
dumpsize=$(( dbsize / 10 * 8 ))
|
||||||
|
# We'll estimate 1:4 ratio on compression on the fly.
|
||||||
|
[[ "$PGDUMPFORMAT" = "c" ]] && dumpsize=$(( dumpsize / 4 ))
|
||||||
|
# We'll estimate 5:4 ratio on native dump followed by the compression.
|
||||||
|
[[ "$PGDUMPFORMAT" = "9" ]] && dumpsize=$(( dumpsize / 4 * 5 ))
|
||||||
|
# Let's calculate the available space (KB units).
|
||||||
|
freespace=$("$DF" --output=avail -k "$("$DIRNAME" "$PGDUMPFILE")" | $TAIL -n1) #"
|
||||||
|
# Is it enough?
|
||||||
|
if [ $freespace -lt $dumpsize ]; then
|
||||||
|
echo -en "$MSG_NOSPACE" | "$TEE" -a "$logfile"
|
||||||
|
# On failure aborts here, except if it had forced.
|
||||||
|
if [ "$PGDUMPFORCED" ]; then
|
||||||
|
echo " - $MSG_FORCED" | "$TEE" -a "$logfile"
|
||||||
|
else
|
||||||
|
echo " - $MSG_ABORTED" | "$TEE" -a "$logfile"; exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've the space checked.
|
||||||
|
|
||||||
|
# Let's dump!
|
||||||
|
# Writes the database as it was ordered, into the dump file or to the STDOUT.
|
||||||
|
#
|
||||||
|
if [ -n "$PGCONTAINER" ]; then
|
||||||
|
# Dockerized database.
|
||||||
|
if [ -z "$PGDUMPFILE" ]; then
|
||||||
|
# STDOUT
|
||||||
|
"$DOCKER" exec $PGCONTAINER \
|
||||||
|
sh -c "export PGPASSWORD=\"$PGPASSWORD\"; \
|
||||||
|
pg_dump -U \"$PGUSER\" -w -h \"$PGHOST\" -p \"$PGPORT\" \
|
||||||
|
-F$PGDUMPFORMAT $PGDUMPACLS -d \"$PGDATABASE\""
|
||||||
|
elif [ "$PGDUMPFORMAT" = "d" ]; then
|
||||||
|
# Directory format doesn't implemented with a dockerized database.
|
||||||
|
echo "$MSG_NODIRDUMP" | "$TEE" -a "$logfile"; exit 1
|
||||||
|
else
|
||||||
|
# File
|
||||||
|
"$DOCKER" exec $PGCONTAINER \
|
||||||
|
sh -c "export PGPASSWORD=\"$PGPASSWORD\"; \
|
||||||
|
pg_dump -U \"$PGUSER\" -w -h \"$PGHOST\" -p \"$PGPORT\" \
|
||||||
|
-F$PGDUMPFORMAT $PGDUMPACLS -d \"$PGDATABASE\"" \
|
||||||
|
>"$PGDUMPFILE" 2>>"$logfile"
|
||||||
|
# If it is a plain dump, compresses it.
|
||||||
|
if [ "${PGDUMPFILE##*.}" = "sql" ]; then
|
||||||
|
"$(which gzip)" "$PGDUMPFILE"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# This is a fake result - TODO!
|
||||||
|
result=0
|
||||||
|
else
|
||||||
|
# Self-hosted database.
|
||||||
|
export PGPASSWORD
|
||||||
|
if [ "$PGDUMPFORMAT" = "9" ]; then
|
||||||
|
# Backward-compatible SQL dump.
|
||||||
|
if [ -z "$PGDUMPFILE" ]; then
|
||||||
|
# STDOUT
|
||||||
|
# 1st the schema with some arbitrary conversions.
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
$PGDUMPACLS --schema-only -d "$PGDATABASE" | \
|
||||||
|
"$EGREP" -iv '^SET idle_in_transaction_session_timeout =' | \
|
||||||
|
"$EGREP" -iv '^SET default_table_access_method =' | \
|
||||||
|
"$SED" 's/FUNCTION =/PROCEDURE = /' | \
|
||||||
|
"$SED" "s/CURRENT_DATE/\('now'::text\)::date/g"
|
||||||
|
# 2nd the data as COPY statements.
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
$PGDUMPACLS --data-only -d "$PGDATABASE"
|
||||||
|
else
|
||||||
|
# File
|
||||||
|
# 1st the schema with some arbitrary conversions.
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
$PGDUMPACLS --schema-only -d "$PGDATABASE" | \
|
||||||
|
"$EGREP" -iv '^SET idle_in_transaction_session_timeout =' | \
|
||||||
|
"$EGREP" -iv '^SET default_table_access_method =' | \
|
||||||
|
"$SED" 's/FUNCTION =/PROCEDURE = /' | \
|
||||||
|
"$SED" "s/CURRENT_DATE/\('now'::text\)::date/g" \
|
||||||
|
>"$PGDUMPFILE" 2>>"$logfile"; result=$?
|
||||||
|
# 2nd the data as COPY statements.
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
$PGDUMPACLS --data-only -d "$PGDATABASE" \
|
||||||
|
>>"$PGDUMPFILE" 2>>"$logfile"; result=$?
|
||||||
|
# Finally compresses it.
|
||||||
|
"$(which gzip)" "$PGDUMPFILE"
|
||||||
|
fi
|
||||||
|
elif [ -z "$PGDUMPFILE" ]; then
|
||||||
|
# STDOUT
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
-F"$PGDUMPFORMAT" $PGDUMPACLS -d "$PGDATABASE" \
|
||||||
|
2>>"$logfile"; result=$?
|
||||||
|
elif [ "$PGDUMPFORMAT" = "d" ]; then
|
||||||
|
# Directory
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
-F"$PGDUMPFORMAT" $PGDUMPACLS -d "$PGDATABASE" \
|
||||||
|
-f "$PGDUMPFILE" 2>>"$logfile"; result=$?
|
||||||
|
else
|
||||||
|
# File
|
||||||
|
"$PG_DUMP" $CONNECT \
|
||||||
|
-F"$PGDUMPFORMAT" $PGDUMPACLS -d "$PGDATABASE" \
|
||||||
|
>"$PGDUMPFILE" 2>>"$logfile"; result=$?
|
||||||
|
# If it is a plain dump, compresses it.
|
||||||
|
if [ "${PGDUMPFILE##*.}" = "sql" ]; then
|
||||||
|
"$(which gzip)" "$PGDUMPFILE"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit $result
|
||||||
|
# That's all, Folks! :)
|
701
.templates/bin/psql_restoredb
Executable file
701
.templates/bin/psql_restoredb
Executable file
@ -0,0 +1,701 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Restores a PostgreSQL database from a native or dockerized RDBMS instance
|
||||||
|
# accessible from this box. Also creates an owner user for this database
|
||||||
|
# (if it doesn't exist) and grants the appropriate privileges.
|
||||||
|
#
|
||||||
|
# Mainly, this is a pretty fatty wrapper script to the pg_restore command.
|
||||||
|
#
|
||||||
|
# Needs PostgreSQL v9.5 (or later).
|
||||||
|
# To restore a database with the necessary user management and grants,
|
||||||
|
# needs the superuser privileges on RDBMS.
|
||||||
|
# * If the RDBMS runs dockerized you need call this script as a Docker manager
|
||||||
|
# user (member of the docker Linux group).
|
||||||
|
# * If we're using a native PostgreSQL, you need call this script as a Linux
|
||||||
|
# user whom the superuser role has been already granted within PostgreSQL,
|
||||||
|
# or you need provide the superuser credentials as well. You must enable
|
||||||
|
# th desired connection in pg_hba file.
|
||||||
|
#
|
||||||
|
# Accepts few pg_restore options as well as the optional database password
|
||||||
|
# and database admin credentials with the optional output pathname:
|
||||||
|
#
|
||||||
|
# $0 [-U dbuser] [-P dbpass] [-h dbhost] [-p dbport]
|
||||||
|
# [-A dbadminuser] [-a dbadminpass] [-r dbreadonlyrole] [-R]
|
||||||
|
# [-C container] [-d database] [-f dumpfile ] [-F dumpformat]
|
||||||
|
# [database (if not in -d)] [dumpfile (if not in -f)]
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# 2022-12-01 v0.9
|
||||||
|
# fix: Coworker user's support.
|
||||||
|
# The reassign function has been totally rewritten. Now considers only
|
||||||
|
# that object what has been actually changed. Does nothing when the
|
||||||
|
# owner is already the designated role, so doesn't disturb the ERP's
|
||||||
|
# own functionality (e.g doesn't cause deadlocks).
|
||||||
|
# While now isn't strictly necessary, transferring the event triggers
|
||||||
|
# to the postgres user has remained, because of better portability.
|
||||||
|
# 2022-11-11 v0.8
|
||||||
|
# new: Coworker users' support.
|
||||||
|
# An event trigger automatism has been set to ensure, the owner of the
|
||||||
|
# objects to be created by coworkers is the database owner. This avoids
|
||||||
|
# permission problems when a foreign role (e.g a DBA or a coworker user)
|
||||||
|
# creates new objects.
|
||||||
|
# fix: Reassigning the restored objects failed, when an object have to
|
||||||
|
# owned by a DBA user (e.g an event trigger). Now we transfer
|
||||||
|
# these objects' ownership (actually the event trigger objects' only)
|
||||||
|
# to the postgres user before trying the global reassign.
|
||||||
|
# 2022-11-03 v0.7
|
||||||
|
# new: Automatic DB username and password generation if them had not
|
||||||
|
# specified. NOTE: if the user has just been created, the password appears
|
||||||
|
# in the log as a plain text - even if it was given (so it is known).
|
||||||
|
# new: Added a new parameter: -R to create a non-login R/O role, named same
|
||||||
|
# as DB username with a -ro suffix. The R/O privileges will be granted
|
||||||
|
# automatically to this role. This works same as -r parameter but
|
||||||
|
# you doesn't need to specify the role name.
|
||||||
|
# mod: Now prefers the TCP authentication over the peer authentication.
|
||||||
|
# mod: Improved notification messages.
|
||||||
|
# 2022-04-21 v0.6
|
||||||
|
# mod: R/O role doesn't created by default but on demand only.
|
||||||
|
# mod: Enhanced REASSIGN on the just loaded DB to avoid shared memory
|
||||||
|
# exhaustion.
|
||||||
|
# 2022-01-24 v0.5
|
||||||
|
# new: Creates a R/O role as well if it doesn't exist yet.
|
||||||
|
# 2021-10-25 v0.4
|
||||||
|
# fix: A typo has blocked receiving DB admin username from the environment.
|
||||||
|
# 2021-06-27 v0.3
|
||||||
|
# fix: Honors properly the given DBA credentials.
|
||||||
|
# fix: Handles given but empty parameters better.
|
||||||
|
# mod: Slash is allowed in hostname for Unix sockets.
|
||||||
|
# 2021-01-11 v0.2
|
||||||
|
# fix: The --no-acl option has been added to the pg_restore call.
|
||||||
|
# mod: Honors the DB locale settings (if any) included into dumpfile.
|
||||||
|
# mod: The minimum PostgreSQL version has been relaxed to 9.5.
|
||||||
|
# 2020-12-17 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
PGCONTAINER=${PGCONTAINER-""} # Docker container's name
|
||||||
|
PGDATABASE=${PGDATABASE-""} # Database name to restore
|
||||||
|
PGDBAUSER=${PGDBAUSER:-""} # Database admin superuser
|
||||||
|
PGDBAPASSWORD=${PGDBAPASSWORD:-""} # Credentials for the DBA user
|
||||||
|
PGDUMP=${PGDUMP-""} # Dump file pathname
|
||||||
|
PGDUMPFORMAT=${PGDUMPFORMAT:-"c"} # Dump file format
|
||||||
|
PGHOST=${PGHOST:-"localhost"} # Connection parameter
|
||||||
|
PGOPTIONS=${PGOPTIONS-""} # Options to pass to pg_dump
|
||||||
|
PGPASSWORD=${PGPASSWORD-""} # Credential for the DB owner
|
||||||
|
PGPORT=${PGPORT:-"5432"} # Connection parameter
|
||||||
|
PGROROLE=${PGROROLE:-""} # R/O role to the restored DB
|
||||||
|
PGUSER=${PGUSER:-""} # Owner of the restored DB
|
||||||
|
|
||||||
|
### Temporailly ignored! Need to sanitize.
|
||||||
|
PGOPTIONS=""
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
#
|
||||||
|
LANG=C
|
||||||
|
LC_ALL=C
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
#
|
||||||
|
LOGSTAMP="\"\$DATE\" +%Y-%m-%d\ %H:%M:%S" # Timestamp format for logs
|
||||||
|
postgres='postgres' # Main DBA user
|
||||||
|
vetodatabases="postgres template0 template1" # Tech DBs aren't to restore
|
||||||
|
# Sets the flag: we need a R/O role as well.
|
||||||
|
[[ -n "$PGROROLE" ]] && PGRONEED='yes' || PGRONEED=''
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_BADDBTYPE="Unknown database type"
|
||||||
|
MSG_BADDUMP="Doesn't exist or doesn't a dumpfile:"
|
||||||
|
MSG_BADOPT="Invalid option"
|
||||||
|
MSG_BADPARAM="Doubtful parameter:"
|
||||||
|
MSG_BLOCKING="This is a fatal error - restore has been aborted."
|
||||||
|
MSG_CONNTERM="DB connection(s) have forced to terminate"
|
||||||
|
MSG_DOCKERGRPNEED="You must be a member of the docker group."
|
||||||
|
MSG_DOESNOTRUN="Doesn't run the database container"
|
||||||
|
MSG_EVTCHGFAIL="Failed to change ownership of the event trigger "
|
||||||
|
MSG_EXISTING="did not create exisiting object"
|
||||||
|
MSG_FAILCONN="Failed to connect the RDBMS."
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_NEWPASS="a new password has been set:"
|
||||||
|
MSG_NEWROLE="a new role has been generated:"
|
||||||
|
MSG_NONBLOCKING="Recoverable error - restore is continuing."
|
||||||
|
MSG_OLDRDBMS="RDBMS version is too old"
|
||||||
|
MSG_PEERAUTH="Peer authentication has been used."
|
||||||
|
MSG_PERCENT="Hint: you may use percent-encoding (e.g %40 instead of @)"
|
||||||
|
MSG_SUPERNEED="user must have DBA (database superuser) privileges."
|
||||||
|
|
||||||
|
MSG_USAGE="Usage: $0 [options] [database [dump_pathname]]\n"
|
||||||
|
MSG_USAGE+="Option:\tENVVAR:\n"
|
||||||
|
MSG_USAGE+=" -A\tPGDBAUSER \tPostgres DB admin superuser\n"
|
||||||
|
MSG_USAGE+=" -a\tPGDBAPASSWORD \tPostgres DB admin password\n"
|
||||||
|
MSG_USAGE+=" -C\tPGCONTAINER \tPostgres Docker container's name\n"
|
||||||
|
MSG_USAGE+=" -d\tPGDATABASE \tPostgres database to restore\n"
|
||||||
|
MSG_USAGE+=" -f\tPGDUMPFILE \tDumpfile pathname\n"
|
||||||
|
#MSG_USAGE+=" -F\tPGDUMPFORMAT \tDumpfile format ($PGDUMPFORMAT)\n"
|
||||||
|
MSG_USAGE+=" -h\tPGHOST \tHostname or IP to connect (localhost)\n"
|
||||||
|
MSG_USAGE+=" -p\tPGPORT \tTCP port to connect (5432)\n"
|
||||||
|
MSG_USAGE+=" -P\tPGPASSWORD \tPostgres password\n"
|
||||||
|
MSG_USAGE+=" -r\tPGROROLE \tPostgres R/O rolename\n"
|
||||||
|
MSG_USAGE+=" -R\t \tPostgres R/O role (names it)\n"
|
||||||
|
MSG_USAGE+=" -U\tPGUSER \tPostgres username\n"
|
||||||
|
|
||||||
|
# Getting options.
|
||||||
|
#
|
||||||
|
while getopts ":-:a:A:C:d:D:f:F:h:H:p:P:r:Ru:U:" option
|
||||||
|
do
|
||||||
|
case ${option} in
|
||||||
|
"-" )
|
||||||
|
if [ "$OPTARG" = "help" ]; then echo -e "$MSG_USAGE" >&2; exit
|
||||||
|
else echo "$MSG_BADOPT --$OPTARG" >&2; exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
"A" ) PGDBAUSER="$OPTARG" ;;
|
||||||
|
"a" ) PGDBAPASSWORD="$OPTARG" ;;
|
||||||
|
"C" ) PGCONTAINER="$OPTARG" ;;
|
||||||
|
"d" | "D" ) PGDATABASE="$OPTARG" ;;
|
||||||
|
"f" ) PGDUMPFILE="$OPTARG" ;;
|
||||||
|
"F" ) PGDUMPFORMAT="$OPTARG" ;;
|
||||||
|
"h" | "H" ) PGHOST="$OPTARG" ;;
|
||||||
|
"P" ) PGPASSWORD="$OPTARG" ;;
|
||||||
|
"p" ) PGPORT="$OPTARG" ;;
|
||||||
|
"r" ) PGRONEED="yes"; PGROROLE="$OPTARG" ;;
|
||||||
|
"R" ) PGRONEED="yes" ;;
|
||||||
|
"u" | "U" ) PGUSER="$OPTARG" ;;
|
||||||
|
\? ) echo "$MSG_BADOPT -$OPTARG" >&2; exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done; shift $((OPTIND -1))
|
||||||
|
# All options has been processed.
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
# Conditional dependencies (according to native or dockerized environment).
|
||||||
|
[[ -z "$PGCONTAINER" ]] \
|
||||||
|
&& additem="psql pg_restore" \
|
||||||
|
|| additem="docker"
|
||||||
|
# Common dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename cat cut dd date dirname grep head id locale readlink sed \
|
||||||
|
tail tee $additem
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Sanitizing the parameters.
|
||||||
|
#
|
||||||
|
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html (modded)
|
||||||
|
[[ -n "$PGDBAUSER" ]] && [[ ! "$PGDBAUSER" =~ ^([[:alnum:]]|[.-_\\+])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGDBAUSER\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# This is only an arbitrary restriction, partially enforces percent-encoding.
|
||||||
|
[[ -n "$PGDBAPASSWORD" ]] && [[ ! "$PGDBAPASSWORD" =~ ^([[:alnum:]]|[ !~&#$<>()%+-_.])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGDBAPASSWORD\n$MSG_PERCENT\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html (modded)
|
||||||
|
[[ -n "$PGCONTAINER" ]] && [[ ! "$PGCONTAINER" =~ ^([[:alnum:]]|[-_])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGCONTAINER\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html (modded)
|
||||||
|
[[ -n "$PGDATABASE" ]] && [[ ! "$PGDATABASE" =~ ^([[:alnum:]]|[_])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGDATABASE\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# This is only an arbitrary restriction.
|
||||||
|
[[ -n "$PGDUMPFILE" ]] && [[ ! "$PGDUMPFILE" =~ ^([[:alnum:]]|[ .-_/])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGDUMPFILE\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://tools.ietf.org/html/rfc1123#page-13 (relaxed)
|
||||||
|
[[ -n "$PGHOST" ]] && [[ ! "$PGHOST" =~ ^([[:alnum:]]|[.-/])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGHOST\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://tools.ietf.org/html/rfc6056 (relaxed)
|
||||||
|
[[ -z "$PGPORT" ]] && PGPORT="5432"
|
||||||
|
[[ -n "$PGPORT" ]] && [[ ! "$PGPORT" =~ ^[1-9]([[:digit:]]){0,4}$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGPORT\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# This is only an arbitrary restriction, partially enforces percent-encoding.
|
||||||
|
[[ -n "$PGPASSWORD" ]] && [[ ! "$PGPASSWORD" =~ ^([[:alnum:]]|[ !~&#$<>()%+-_.])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGPASSWORD\n$MSG_PERCENT\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html (modded)
|
||||||
|
[[ -n "$PGROROLE" ]] && [[ ! "$PGROROLE" =~ ^([[:alnum:]]|[.-_\\+])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGROROLE\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html (modded)
|
||||||
|
[[ -n "$PGUSER" ]] && [[ ! "$PGUSER" =~ ^([[:alnum:]]|[.-_\\+])*$ ]] \
|
||||||
|
&& echo -e "$MSG_BADPARAM $PGUSER\n$MSG_USAGE" >&2 && exit 1
|
||||||
|
# We've at least a minimally checked parameters.
|
||||||
|
|
||||||
|
# Need to be root or a Docker manager user if the DB runs in a container.
|
||||||
|
#
|
||||||
|
[[ -n "$PGCONTAINER" ]] && [[ "$USER" != 'root' ]] \
|
||||||
|
&& [[ -z "$(echo "$("$ID" -Gn "$USER") " | "$GREP" ' docker ')" ]] \
|
||||||
|
&& echo "$MSG_DOCKERGRPNEED" >&2 && exit 1 #"
|
||||||
|
|
||||||
|
# If the PostgreSQL is dockerized the container must be running.
|
||||||
|
#
|
||||||
|
[[ -n "$PGCONTAINER" ]] \
|
||||||
|
&& [[ -z "$("$DOCKER" ps -q -f name=$PGCONTAINER)" ]] \
|
||||||
|
&& echo "$MSG_DOESNOTRUN $PGCONTAINER" >&2 && exit 1
|
||||||
|
|
||||||
|
# Determines the database to restore.
|
||||||
|
#
|
||||||
|
# Lack of -d the 1st non-option parameter is the database's name.
|
||||||
|
if [ -z "$PGDATABASE" -a -n "$1" ]; then PGDATABASE="$1"; shift; fi
|
||||||
|
# The database's name is mandatory.
|
||||||
|
if [ -z "$PGDATABASE" ]
|
||||||
|
then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# A humble sanitization.
|
||||||
|
if [[ ! "$PGDATABASE" =~ ^([[:alnum:]]|[_])*$ ]]
|
||||||
|
then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# Silently refuses the PostgreSQL internal databases.
|
||||||
|
for veto in $vetodatabases ""
|
||||||
|
do
|
||||||
|
[[ "$PGDATABASE" = "$veto" ]] && exit 0
|
||||||
|
done
|
||||||
|
# We've a database name to restore.
|
||||||
|
|
||||||
|
# Determines the database owner's username.
|
||||||
|
#
|
||||||
|
# If it isn't given we suppose the name is identical to the database
|
||||||
|
if [ -z "$PGUSER" ]; then
|
||||||
|
PGUSER="$PGDATABASE"
|
||||||
|
# We'll generate a random password for this user. This will be relevant only
|
||||||
|
# when it doesn't exist and we need to create it.
|
||||||
|
if [ -z "$PGPASSWORD" ]; then
|
||||||
|
PGPASSWORD=$("$DD" if=/dev/urandom bs=64 count=1 2>/dev/null | "$TR" -dc 'a-zA-Z0-9')
|
||||||
|
PGPASSWORD=${PGPASSWORD:0:16}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# We've the database owner's username.
|
||||||
|
|
||||||
|
# Determines the R/O database role's username.
|
||||||
|
#
|
||||||
|
# If it isn't given we use the owner's name appended with a "_ro" postfix.
|
||||||
|
[[ -z "$PGROROLE" ]] && PGROROLE="${PGUSER}_ro"
|
||||||
|
# We've the R/O role's name (but maybe we will not use it).
|
||||||
|
|
||||||
|
# Determines the dumpfile.
|
||||||
|
#
|
||||||
|
# Lack of -f the 2nd non-option parameter is the dumpfile's pathname.
|
||||||
|
if [ -z "$PGDUMPFILE" -a -n "$1" ]; then PGDUMPFILE="$1"; shift; fi
|
||||||
|
# The dumpfile is mandatory.
|
||||||
|
if [ -z "$PGDUMPFILE" ]
|
||||||
|
then echo -e "$MSG_USAGE" >&2; exit 1; fi
|
||||||
|
# The PGDUMPFILE must point to a readable file.
|
||||||
|
# If it is an existing symlink dereferences it to ensure, it points to a file.
|
||||||
|
if [ -h "$PGDUMPFILE" ]; then
|
||||||
|
if [[ "$("$READLINK" "$PGDUMPFILE")" != /* ]]
|
||||||
|
# relative path in symlink
|
||||||
|
then PGDUMPFILE="$("$DIRNAME" "$PGDUMPFILE")/$("$READLINK" "$PGDUMPFILE")"
|
||||||
|
# absolute path in symlink
|
||||||
|
else PGDUMPFILE="$("$READLINK" "$PGDUMPFILE")"; fi
|
||||||
|
fi
|
||||||
|
# Let's check it!
|
||||||
|
if [ ! -r "$PGDUMPFILE" -o ! -f "$PGDUMPFILE" ]
|
||||||
|
then echo -e "$MSG_BADDUMP $PGDUMPFILE"; exit 1; fi
|
||||||
|
# We've an existing dumpfile.
|
||||||
|
|
||||||
|
# Tries to get the locale settings of this dump.
|
||||||
|
#
|
||||||
|
PGDUMPPROPS=""
|
||||||
|
LOCALES=$("$LOCALE" -a | "$TR" [:upper:] [:lower:])
|
||||||
|
[[ -z "$PG_RESTORE" ]] && PG_RESTORE="$(which pg_restore)"
|
||||||
|
if [ -n "$PG_RESTORE" -a -x "$PG_RESTORE" ]; then
|
||||||
|
# Gets the CREATE DATABASE line and parses it.
|
||||||
|
createline=$("$PG_RESTORE" -C -s -f - "$PGDUMPFILE" | \
|
||||||
|
"$GREP" -i 'create database')
|
||||||
|
# Encoding (always).
|
||||||
|
property=$(echo "$createline" | \
|
||||||
|
"$SED" -n "s/^CREATE DATABASE .* ENCODING = \('*[[:alnum:]]*'*\).*$/\1/ip")
|
||||||
|
[[ -n "$property" ]] && \
|
||||||
|
PGDUMPPROPS+=" ENCODING = $property"
|
||||||
|
# Collation (only if it is available in current locales).
|
||||||
|
property=$(echo "$createline" | \
|
||||||
|
"$SED" -n "s/^CREATE DATABASE .* LC_COLLATE = \('*[[:alnum:]._-]*'*\).*$/\1/ip")
|
||||||
|
# Tricky because of slightly different locale naming in Linux and Postgres.
|
||||||
|
[[ -n "$property" ]] && \
|
||||||
|
[[ " $(echo $LOCALES) " =~ " $(echo "$property" | "$SED" 's/utf-8/utf8/i' | \
|
||||||
|
"$TR" -d \' | "$TR" [:upper:] [:lower:]) " ]] && \
|
||||||
|
PGDUMPPROPS+=" LC_COLLATE = $property"
|
||||||
|
# CType (only if it is available in current locales).
|
||||||
|
property=$(echo "$createline" | \
|
||||||
|
"$SED" -n "s/^CREATE DATABASE .* LC_CTYPE = \('*[[:alnum:]._-]*'*\).*$/\1/ip")
|
||||||
|
# Tricky because of slightly different locale naming in Linux and Postgres.
|
||||||
|
[[ -n "$property" ]] && \
|
||||||
|
[[ " $(echo $LOCALES) " =~ " $(echo "$property" | "$SED" 's/utf-8/utf8/i' | \
|
||||||
|
"$TR" -d \' | "$TR" [:upper:] [:lower:]) " ]] && \
|
||||||
|
PGDUMPPROPS+=" LC_CTYPE = $property"
|
||||||
|
fi
|
||||||
|
# Maybe we have a guess about the dump's encoding.
|
||||||
|
|
||||||
|
# Finds the LOGFILE to use.
|
||||||
|
#
|
||||||
|
# If the folder containing the PGDUMPFILE is writable, we will use a
|
||||||
|
# logfile with the same name as the dumpfile but with .log extension.
|
||||||
|
[[ -w "$("$DIRNAME" "$PGDUMPFILE")" ]] \
|
||||||
|
&& LOGFILE="${PGDUMPFILE%.*}.log" \
|
||||||
|
|| LOGFILE="/dev/null"
|
||||||
|
# We've a suitable logfile.
|
||||||
|
|
||||||
|
# Opens the log and takes care to close it when finish.
|
||||||
|
#
|
||||||
|
echo "$(eval $LOGSTAMP) Starting job #$$ $("$TR" '\0' ' ' < /proc/$$/cmdline)" | \
|
||||||
|
"$TEE" -a "$LOGFILE"
|
||||||
|
# Sets a trap to make always a corresponding exit log entry as well.
|
||||||
|
function close_log() {
|
||||||
|
echo -e "$(eval $LOGSTAMP) Finished job #$$ $("$TR" '\0' ' ' < /proc/$$/cmdline)\n" | \
|
||||||
|
"$TEE" -a "$LOGFILE"
|
||||||
|
}
|
||||||
|
trap -- 'close_log' EXIT
|
||||||
|
# We started logging.
|
||||||
|
|
||||||
|
# Prepopulates the SQL command skeleton (macro).
|
||||||
|
#
|
||||||
|
# This skeleton makes the SQL calls independent to the environment
|
||||||
|
# (native or dockerized) and credentials. We need only actualize the
|
||||||
|
# CONNECT, DATABASE and SQLVERB clauses then eval $DO_SQLVERB.
|
||||||
|
# Warning: the parameters must had been sanitized!
|
||||||
|
DO_SQLVERB=""
|
||||||
|
DO_SQLVERB+="export PGPASSWORD=\"\$PGDBAPASSWORD\"; "
|
||||||
|
DO_SQLVERB+="echo -e \"\$SQLVERB\" | "
|
||||||
|
DO_SQLVERB+="\"\$PSQL\" \$CONNECT -t -d \$DATABASE "
|
||||||
|
# We've a suitable SQL macro.
|
||||||
|
|
||||||
|
# Do we connect the database as a DBA?
|
||||||
|
#
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="SELECT 1;"
|
||||||
|
# Sets the default DBA username for dockerized and native RDBMS as well.
|
||||||
|
if [ -z "$PGDBAUSER" ]; then
|
||||||
|
[[ -n "$PGACONTAINER" ]] \
|
||||||
|
&& PGDBAUSER="postgres" \
|
||||||
|
|| PGDBAUSER="$USER"
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
# We'll try the TCP connection first.
|
||||||
|
CONNECT="-U $PGDBAUSER -w -h $PGHOST -p $PGPORT"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2>/dev/null)
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
if [ "$result" != "1" ]; then
|
||||||
|
# On failure we'll try the local connection (Unix-domain socket) as well.
|
||||||
|
CONNECT=""
|
||||||
|
result=$(eval "$DO_SQLVERB" 2>/dev/null); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ "$result" != "1" ]] \
|
||||||
|
&& echo -e "$MSG_FAILCONN" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# Leaves a warning about using the peer authentication.
|
||||||
|
echo -e "$MSG_PEERAUTH" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
fi
|
||||||
|
# We've a valid CONNECT clause.
|
||||||
|
|
||||||
|
# Checks the superuser privilege.
|
||||||
|
#
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="SHOW is_superuser;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
[[ "$result" != "on" ]] \
|
||||||
|
&& echo -e "$PGDBAUSER $MSG_SUPERNEED" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# We're a DB superuser.
|
||||||
|
|
||||||
|
# Checks the PostgreSQL version - 9.5 or later needed.
|
||||||
|
#
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="SELECT current_setting('server_version_num')::INTEGER;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
(( result < 90500 )) \
|
||||||
|
&& echo -e "$MSG_OLDRDBMS: $result" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# RDBMS version is proper.
|
||||||
|
|
||||||
|
# Creates the database user (owner) if it doesn't exist.
|
||||||
|
#
|
||||||
|
echo -e "CREATE ROLE" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="
|
||||||
|
DO LANGUAGE plpgsql \$\$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT * FROM pg_user WHERE usename = '$PGUSER') THEN
|
||||||
|
CREATE ROLE $PGUSER WITH LOGIN ENCRYPTED PASSWORD '$PGPASSWORD';
|
||||||
|
RAISE NOTICE '$MSG_NEWROLE $PGUSER';
|
||||||
|
RAISE NOTICE '$MSG_NEWPASS $PGPASSWORD';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE '$MSG_EXISTING $PGUSER';
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
\$\$;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# We've the database user.
|
||||||
|
|
||||||
|
# On demand creates a (non-login) R/O role as well if it doesn't exist.
|
||||||
|
#
|
||||||
|
if [ -n "$PGRONEED" ]; then
|
||||||
|
echo -e "CREATE ROLE (R/O)" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="
|
||||||
|
DO LANGUAGE plpgsql \$\$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT * FROM pg_roles WHERE rolname = '$PGROROLE') THEN
|
||||||
|
CREATE ROLE $PGROROLE
|
||||||
|
NOSUPERUSER INHERIT NOCREATEDB NOCREATEROLE NOREPLICATION;
|
||||||
|
RAISE NOTICE '$MSG_NEWROLE $PGROROLE';
|
||||||
|
ELSE
|
||||||
|
RAISE NOTICE '$MSG_EXISTING $PGROROLE';
|
||||||
|
END IF;
|
||||||
|
END;
|
||||||
|
\$\$; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
# We don't consider the failure as blocking.
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# We hope that we've the R/O role defined.
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Drops all existing connections to the database being restored,
|
||||||
|
# then (re)creates the database from scratch using template0.
|
||||||
|
#
|
||||||
|
echo -e "DROP DATABASE" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="
|
||||||
|
DO LANGUAGE plpgsql \$\$
|
||||||
|
DECLARE conn_terminated SMALLINT;
|
||||||
|
BEGIN
|
||||||
|
SELECT COUNT(pg_terminate_backend(pid))
|
||||||
|
FROM pg_stat_activity
|
||||||
|
INTO conn_terminated
|
||||||
|
WHERE datname='$PGDATABASE';
|
||||||
|
IF conn_terminated > 0 THEN
|
||||||
|
RAISE NOTICE '% $MSG_CONNTERM', conn_terminated ;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
\$\$; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
#
|
||||||
|
# Drops the database.
|
||||||
|
#
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="DROP DATABASE IF EXISTS $PGDATABASE; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
#
|
||||||
|
# Recreates the database, honors the locale properties (if any).
|
||||||
|
#
|
||||||
|
echo -e "CREATE DATABASE" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="postgres"
|
||||||
|
SQLVERB="CREATE DATABASE $PGDATABASE "
|
||||||
|
SQLVERB+="WITH TEMPLATE = template0 OWNER = $PGUSER $PGDUMPPROPS; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_BLOCKING" | "$TEE" -a "$LOGFILE" >&2 \
|
||||||
|
&& exit 1
|
||||||
|
# We've an empty database.
|
||||||
|
|
||||||
|
# Grants all privileges on this database, and transfers the public
|
||||||
|
# schema's ownership to the database user.
|
||||||
|
#
|
||||||
|
echo -e "GRANT" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="$PGDATABASE"
|
||||||
|
SQLVERB="GRANT ALL PRIVILEGES ON DATABASE $PGDATABASE TO $PGUSER; "
|
||||||
|
SQLVERB+="ALTER SCHEMA public OWNER TO $PGUSER; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# Ownership and grants have set on the empty database.
|
||||||
|
|
||||||
|
# Restores the database from the dump.
|
||||||
|
#
|
||||||
|
echo -e "RESTORE" | "$TEE" -a "$LOGFILE"
|
||||||
|
if [ -n "$PGCONTAINER" ]; then
|
||||||
|
echo "PSQL dockerized - TODO!"
|
||||||
|
else
|
||||||
|
export PGPASSWORD="$PGDBAPASSWORD"
|
||||||
|
"$PG_RESTORE" $CONNECT \
|
||||||
|
--no-owner --no-acl --disable-triggers \
|
||||||
|
-d $PGDATABASE "$PGDUMPFILE" \
|
||||||
|
>/dev/null 2> >("$TEE" -a "$LOGFILE" >&2)
|
||||||
|
fi
|
||||||
|
# Unfortunately the result code doesn't differentiate the
|
||||||
|
# blocking and non-blocking states.
|
||||||
|
[[ $? -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# We had a try to restore the database - the result isn't properly defined.
|
||||||
|
|
||||||
|
# Tries to transfer the ownership of the restored objects to the database user,
|
||||||
|
# but we've some exceptions to deal first.
|
||||||
|
echo -e "REASSIGN EVENT TRIGGERS" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="$PGDATABASE"
|
||||||
|
# The event triggers (if any) have to owned by a DBA user.
|
||||||
|
# We try to transfer the ownership of this trigger to the postgres user,
|
||||||
|
# whom by default we will not use for daily operations.
|
||||||
|
SQLVERB="
|
||||||
|
DO LANGUAGE plpgsql \$\$
|
||||||
|
DECLARE evtrg text;
|
||||||
|
BEGIN
|
||||||
|
FOR evtrg IN EXECUTE 'SELECT evtname FROM pg_event_trigger'
|
||||||
|
LOOP
|
||||||
|
BEGIN
|
||||||
|
EXECUTE 'ALTER EVENT TRIGGER ' || evtrg || ' OWNER TO $postgres';
|
||||||
|
-- EXCEPTION
|
||||||
|
-- WHEN others THEN RAISE NOTICE '$MSG_EVTCHGFAIL %', evt;
|
||||||
|
END;
|
||||||
|
END LOOP;
|
||||||
|
END;
|
||||||
|
\$\$;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# Exceptions have dealt, we're moving to the regular objects.
|
||||||
|
#
|
||||||
|
# To avoid exhausting the shared memory or running out from
|
||||||
|
# max_locks_per_transaction first we'll enumerate all the tables and change
|
||||||
|
# its owner one by one.
|
||||||
|
echo -e "REASSIGN" | "$TEE" -a "$LOGFILE"
|
||||||
|
# Creates separate ALTER commands for each table (in all non-technical schemas).
|
||||||
|
SQLVERB="SELECT 'ALTER TABLE '|| schemaname || '.' || tablename || ' OWNER TO $PGUSER;' "
|
||||||
|
SQLVERB+="FROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema') "
|
||||||
|
SQLVERB+="ORDER BY schemaname, tablename; "
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
if [ $excode -ne 0 ]; then
|
||||||
|
# On error simply skips this step.
|
||||||
|
echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
else
|
||||||
|
# Runs the generated ALTER commands.
|
||||||
|
SQLVERB="$result"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
# Reassigns all remaining objects at once.
|
||||||
|
SQLVERB="REASSIGN OWNED BY CURRENT_USER TO $PGUSER;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# Ownership and R/W privileges has been set.
|
||||||
|
|
||||||
|
# Sets a function and a trigger to ensure, the objects to be created in the future
|
||||||
|
# will be owned by the database owner. This avoids permission problems, when
|
||||||
|
# a foreign role (e.g a DBA) modifies any objects.
|
||||||
|
# Based on https://stackoverflow.com/questions/64046147
|
||||||
|
#
|
||||||
|
echo -e "SET EVENT TRIGGER" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="$PGDATABASE"
|
||||||
|
# The automatic reassign procedure for a future use.
|
||||||
|
SQLVERB="
|
||||||
|
CREATE OR REPLACE FUNCTION public.trg_set_owner()
|
||||||
|
RETURNS event_trigger
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS \$\$
|
||||||
|
DECLARE
|
||||||
|
event_tuple record;
|
||||||
|
schema varchar;
|
||||||
|
name varchar;
|
||||||
|
kind varchar;
|
||||||
|
owner varchar;
|
||||||
|
BEGIN
|
||||||
|
-- Enumerates the queries involved in this event.
|
||||||
|
FOR event_tuple IN SELECT * FROM pg_event_trigger_ddl_commands()
|
||||||
|
LOOP
|
||||||
|
-- Retrieves the relevant properties of the object.
|
||||||
|
SELECT relnamespace::regnamespace::name,
|
||||||
|
relname::text,
|
||||||
|
relkind,
|
||||||
|
relowner::regrole
|
||||||
|
INTO schema, name, kind, owner
|
||||||
|
FROM pg_catalog.pg_class
|
||||||
|
WHERE oid = event_tuple.objid;
|
||||||
|
IF NOT owner = '$PGUSER' THEN
|
||||||
|
-- Reassigns this object.
|
||||||
|
CASE
|
||||||
|
WHEN kind = 'r' THEN
|
||||||
|
EXECUTE 'ALTER TABLE IF EXISTS ' || schema || '.' || name || ' OWNER TO $PGUSER';
|
||||||
|
WHEN kind = 'v' OR kind = 'm' THEN
|
||||||
|
EXECUTE 'ALTER VIEW IF EXISTS ' || schema || '.' || name || ' OWNER TO $PGUSER';
|
||||||
|
END CASE;
|
||||||
|
END IF;
|
||||||
|
END LOOP;
|
||||||
|
END;
|
||||||
|
\$\$; "
|
||||||
|
# The event trigger whitch will call the procedure above.
|
||||||
|
SQLVERB+="
|
||||||
|
DROP EVENT TRIGGER IF EXISTS trg_set_owner;
|
||||||
|
CREATE EVENT TRIGGER trg_set_owner
|
||||||
|
ON ddl_command_end
|
||||||
|
WHEN tag IN ('CREATE TABLE', 'CREATE TABLE AS', 'CREATE VIEW')
|
||||||
|
EXECUTE PROCEDURE public.trg_set_owner(); "
|
||||||
|
# Transfers the ownership of this trigger to the postgres DBA user,
|
||||||
|
# whom by default we will not use for daily operations.
|
||||||
|
SQLVERB+="
|
||||||
|
ALTER EVENT TRIGGER trg_set_owner OWNER TO $postgres; "
|
||||||
|
# Let's execute (failure doesn't blocks).
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# Done with the reassign automatism.
|
||||||
|
|
||||||
|
# Grants SELECT on all tables and sequences in all schemas to the R/O role (if it does exist).
|
||||||
|
# https://dba.stackexchange.com/questions/95867/grant-usage-on-all-schemas-in-a-database
|
||||||
|
#
|
||||||
|
if [ -n "$PGRONEED" ]; then
|
||||||
|
echo -e "GRANT SELECT (R/O)" | "$TEE" -a "$LOGFILE"
|
||||||
|
DATABASE="$PGDATABASE"
|
||||||
|
SQLVERB="GRANT CONNECT ON DATABASE $PGDATABASE TO ${PGROROLE}; "
|
||||||
|
SQLVERB+="
|
||||||
|
DO LANGUAGE plpgsql \$do\$
|
||||||
|
DECLARE
|
||||||
|
sch text;
|
||||||
|
BEGIN
|
||||||
|
FOR sch IN SELECT nspname FROM pg_namespace where nspname != 'pg_toast'
|
||||||
|
and nspname != 'pg_temp_1' and nspname != 'pg_toast_temp_1'
|
||||||
|
and nspname != 'pg_statistic' and nspname != 'pg_catalog'
|
||||||
|
and nspname != 'information_schema'
|
||||||
|
LOOP
|
||||||
|
EXECUTE format(\$\$ GRANT USAGE ON SCHEMA %I TO ${PGROROLE} \$\$, sch);
|
||||||
|
EXECUTE format(\$\$ GRANT SELECT ON ALL TABLES IN SCHEMA %I TO ${PGROROLE} \$\$, sch);
|
||||||
|
EXECUTE format(\$\$ GRANT SELECT ON ALL SEQUENCES IN SCHEMA %I TO ${PGROROLE} \$\$, sch);
|
||||||
|
EXECUTE format(\$\$ ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT SELECT ON TABLES TO ${PGROROLE} \$\$, sch);
|
||||||
|
EXECUTE format(\$\$ ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT SELECT ON SEQUENCES TO ${PGROROLE} \$\$, sch);
|
||||||
|
END LOOP;
|
||||||
|
END;
|
||||||
|
\$do\$;"
|
||||||
|
result=$(eval "$DO_SQLVERB" 2> >("$TEE" -a "$LOGFILE" >&2)); excode=$?
|
||||||
|
result="${result//[[:space:]]/}"
|
||||||
|
[[ $excode -ne 0 ]] \
|
||||||
|
&& echo -e "$MSG_NONBLOCKING" | "$TEE" -a "$LOGFILE" >&2
|
||||||
|
# R/O grants has been set.
|
||||||
|
fi
|
||||||
|
# Done with grants.
|
||||||
|
|
||||||
|
# Done with restore.
|
||||||
|
|
||||||
|
# Closing log entry will be handled via EXIT trap.
|
||||||
|
#
|
||||||
|
# That's all, Folks! :)
|
431
.templates/bin/rotate_folder
Executable file
431
.templates/bin/rotate_folder
Executable file
@ -0,0 +1,431 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Helper script to rotate contents of a folder with a daily-weekly-monthly plan.
|
||||||
|
# TL;DR: rotate_folder -f path/to/my/folder # Shows what would be done.
|
||||||
|
# rotate_folder -f path/to/my/folder --doit # Makes the job.
|
||||||
|
#
|
||||||
|
# By default the script makes a dry run - doesn't delete anything, only lists
|
||||||
|
# the operations would be done. You may force the execution by --doit command
|
||||||
|
# line parameter. Another optional command line parameter is -f followed by
|
||||||
|
# the pathname of the folder intended to be rotated. Lack of it the script
|
||||||
|
# assumes the current folder (pwd).
|
||||||
|
#
|
||||||
|
# You may configure the script by environment variables and/or by a
|
||||||
|
# configuration textfile. This file should be placed into the folder intended
|
||||||
|
# to be rotated. It's name should be a dot followed by the script's name and a
|
||||||
|
# .conf extension (.rotate_folder.conf by default). The script will create a
|
||||||
|
# default config file automatically on first (dry) run, if it doesn't exist.
|
||||||
|
#
|
||||||
|
# The configurable parameters and their defaults are:
|
||||||
|
# BACKUP_FOLDER="" # pathname of the folder intended to be rotated
|
||||||
|
# CLASSES_PATTERN="" # see below
|
||||||
|
# DOIT="" # if empty the script makes a dry run
|
||||||
|
# RETAIN_DAYS=7 # retains all files created within that many days
|
||||||
|
# RETAIN_WEEKS=4 # retains one file per week/month,
|
||||||
|
# RETAIN_MONTHS=12 # created within that many weeks/months
|
||||||
|
#
|
||||||
|
# If you specify a CLASSES_PATTERN the script will classify the files in folder
|
||||||
|
# and rotate the files class by class independently. A pattern is a regexp:
|
||||||
|
# * the script considers only the filenames matching the whole regexp;
|
||||||
|
# * the regexp must contain parts in capturing parentheses (classifiers).
|
||||||
|
# A class is a set of filenames where the matching part to the all classifiers
|
||||||
|
# are the same. For example, if CLASSES_PATTERN='^(.*)-[0-9].tgz'
|
||||||
|
# then "alpha-1.tgz alpha-2.tgz ... alpha-9.tgz" are members of a class;
|
||||||
|
# "beta-1.tgz beta-2.tgz ... beta-9.tgz" are members of another class.
|
||||||
|
# "beta-10.tgz gamma-1.log" won't be processed beacuse they don't match
|
||||||
|
# the pattern at all.
|
||||||
|
# In this example the "alpha" and "beta" files will be rotated independently.
|
||||||
|
#
|
||||||
|
# The rotating rules are:
|
||||||
|
# * all files created within RETAIN_DAYS will be retained.
|
||||||
|
# * furthermore from files created within RETAIN_WEEKS, only one file
|
||||||
|
# (the oldest) will be retained for every 7 days period.
|
||||||
|
# * furthermore from files created within RETAIN_MONTHS, only one file
|
||||||
|
# (the oldest) will be retained for every 30 days period.
|
||||||
|
#
|
||||||
|
# On dry run the script lists all the files of the class with following
|
||||||
|
# abbreviations:
|
||||||
|
# DR filename - would be retained by daily rule
|
||||||
|
# WR filename - would be retained by weekly rule
|
||||||
|
# WX filename - would be deleted by weekly rule
|
||||||
|
# MR filename - would be retained by monthly rule
|
||||||
|
# MX filename - would be deleted by monthly rule
|
||||||
|
# AX filename - would be deleted no rule match it, because is too old
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "SMARTERP_skeleton" repository.
|
||||||
|
# 2021.02.12 v0.3
|
||||||
|
# add: Multiple classes (mainly rewritten).
|
||||||
|
# mod: Accepts the first command line parameter as a folder (doesn't
|
||||||
|
# need the -f option). But the folder doesn't defaults to the $PWD.
|
||||||
|
# 2020-11-24 v0.2
|
||||||
|
# fix: Typos.
|
||||||
|
# mod: Warnings also go to the STDERR.
|
||||||
|
# 2020-11-02 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
#
|
||||||
|
BACKUP_FOLDER=${BACKUP_FOLDER-""}
|
||||||
|
CLASSES_PATTERN=${CLASSES_PATTERN-""}
|
||||||
|
RETAIN_DAYS=${RETAIN_DAYS-"7"}
|
||||||
|
RETAIN_WEEKS=${RETAIN_WEEKS-"4"}
|
||||||
|
RETAIN_MONTHS=${RETAIN_MONTHS-"12"}
|
||||||
|
|
||||||
|
# Other initialisations (maybe overridden by configuration).
|
||||||
|
#
|
||||||
|
DOIT=""
|
||||||
|
|
||||||
|
# Messages (maybe overriden by configuration).
|
||||||
|
#
|
||||||
|
MSG_BADFOLDER="Doesn't exist or doesn't writable"
|
||||||
|
MSG_BADOPT="Invalid option"
|
||||||
|
MSG_BADPATTERN="The pattern given seems to be illegal"
|
||||||
|
MSG_CREATED="A new, empty configuration has been created.\n"
|
||||||
|
MSG_CREATED+="Feel free to fill in and rerun this program!\n"
|
||||||
|
MSG_CREATED+="You may force the execution unconfigurated with --doit option."
|
||||||
|
MSG_DELDRY="Dry run - these files would have been deleted:"
|
||||||
|
MSG_DELREAL="These files have been deleted:"
|
||||||
|
MSG_FAILCREATE="Failed to create a new, empty configuration file.\n"
|
||||||
|
MSG_FAILCREATE+="You may force the execution unconfigurated with --doit option."
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_NOCONF="Didn't find the configuration file"
|
||||||
|
MSG_NOCLASSES="Didn't find suitable classes according to pattern"
|
||||||
|
MSG_NOFILES="Didn't found files to rotate."
|
||||||
|
MSG_SCHEDULE="Dry run - this is the schedule:"
|
||||||
|
MSG_TODOIT="Dry run - you may force the execution with --doit option."
|
||||||
|
|
||||||
|
# There is nothing to configure below (I hope).
|
||||||
|
###############################################
|
||||||
|
|
||||||
|
# Getting command line options.
|
||||||
|
while getopts ":-:f:" option
|
||||||
|
do
|
||||||
|
case ${option} in
|
||||||
|
"-" )
|
||||||
|
if [ "$OPTARG" = "doit" ]; then DOIT="yes"
|
||||||
|
else echo "$MSG_BADOPT --$OPTARG" >&2; exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
"f" ) BACKUP_FOLDER="$OPTARG" ;;
|
||||||
|
\? ) echo "$MSG_BADOPT -$OPTARG" >&2; exit 1 ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
# Done with options.
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename date dirname egrep sed seq sort stat xargs
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Checks the backup folder.
|
||||||
|
# If wasn't defined yet accepts the 1st command line parameter as well.
|
||||||
|
if [ -z "$BACKUP_FOLDER" ]; then BACKUP_FOLDER="$1"; shift; fi
|
||||||
|
# Removes the trailing slash (if any).
|
||||||
|
BACKUP_FOLDER=${BACKUP_FOLDER%/}
|
||||||
|
# Checks and gives up here if fails.
|
||||||
|
if [ -z "$BACKUP_FOLDER" -o ! -d "$BACKUP_FOLDER" -o ! -w "$BACKUP_FOLDER" ]
|
||||||
|
then echo -e "$MSG_BADFOLDER $BACKUP_FOLDER" >&2; exit 1; fi
|
||||||
|
|
||||||
|
# Gets the configuration (if any).
|
||||||
|
BACKUP_CONF="$BACKUP_FOLDER/.$("$BASENAME" "$0").conf"
|
||||||
|
if [ -r $BACKUP_CONF ]; then . "$BACKUP_CONF"
|
||||||
|
else
|
||||||
|
# Warns about failure.
|
||||||
|
echo -e "$MSG_NOCONF $BACKUP_CONF"
|
||||||
|
# When on dry run tries to write a new file with some help text and defaults.
|
||||||
|
if [ -z "$DOIT" -a -z "$CLASSES_PATTERN" ]; then
|
||||||
|
cat > "$BACKUP_CONF" 2>/dev/null << EOF
|
||||||
|
# This is a shell script excerpt for configuration purposes only.
|
||||||
|
# Handle with care! Please don't put code here, only variables.
|
||||||
|
# The configurable parameters for $("$BASENAME" "$0") script and their defaults are:
|
||||||
|
|
||||||
|
# CLASSES_PATTERN="" # see below
|
||||||
|
# DOIT="" # if empty the script makes a dry run
|
||||||
|
# RETAIN_DAYS=7 # retains all files created within that many days
|
||||||
|
# RETAIN_WEEKS=4 # retains one file per week/month,
|
||||||
|
# RETAIN_MONTHS=12 # created within that many weeks/months
|
||||||
|
|
||||||
|
# If you specify a CLASSES_PATTERN the script will classify the files in folder
|
||||||
|
# and rotates the files class by class independently. A pattern is a regexp:
|
||||||
|
# * the script considers only the filenames matching the whole regexp;
|
||||||
|
# * the regexp must contain parts in capturing parentheses (classifiers).
|
||||||
|
# A class is a set of filenames where the matching part to the all classifiers
|
||||||
|
# is the same. For example, if CLASSES_PATTERN='^(.*)-[0-9].tgz'
|
||||||
|
# then "alpha-1.tgz alpha-2.tgz ... alpha-9.tgz" are members of a class;
|
||||||
|
# "beta-1.tgz beta-2.tgz ... beta-9.tgz" are members of another class.
|
||||||
|
# "beta-10.tgz gamma-1.log" won't be processed beacuse they don't match
|
||||||
|
# the pattern at all.
|
||||||
|
# In this example the "alpha" and "beta" files will be rotated independently.
|
||||||
|
#
|
||||||
|
# The rotating rules are:
|
||||||
|
# * all files have created within RETAIN_DAYS will be retained.
|
||||||
|
# * furthermore from files created within RETAIN_WEEKS, only one file
|
||||||
|
# (the oldest) will be retained for every 7 days period.
|
||||||
|
# * furthermore from files created within RETAIN_MONTHS, only one file
|
||||||
|
# (the oldest) will be retained for every 30 days period.
|
||||||
|
#
|
||||||
|
# On dry run the script lists all the files of the class with following
|
||||||
|
# abbreviations:
|
||||||
|
# DR filename - would be retained by daily rule
|
||||||
|
# WR filename - would be retained by weekly rule
|
||||||
|
# WX filename - would be deleted by weekly rule
|
||||||
|
# MR filename - would be retained by monthly rule
|
||||||
|
# MX filename - would be deleted by monthly rule
|
||||||
|
# AX filename - would be deleted no rule match it, because is too old
|
||||||
|
EOF
|
||||||
|
# Reports the success or failure and stops here.
|
||||||
|
if [ -r "$BACKUP_CONF" ];
|
||||||
|
then echo -e "$MSG_CREATED" >&2; exit
|
||||||
|
else echo -e "$MSG_FAILCREATE" >&2; exit 1; fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# Configuration file has been handled.
|
||||||
|
|
||||||
|
# Initialisations which are protected from configuration.
|
||||||
|
(( SECS_DAY = 60*60*24 ))
|
||||||
|
(( SECS_WEEK = 7*SECS_DAY ))
|
||||||
|
(( SECS_MONTH = 30*SECS_DAY ))
|
||||||
|
TIMESTAMP=$("$DATE" '+%s')
|
||||||
|
|
||||||
|
# This function rotates the files matching to its parameter
|
||||||
|
# which is a definite regexp (without parenthesised parts).
|
||||||
|
function rotate_class {
|
||||||
|
|
||||||
|
local CLASSES_PATTERN="$1"; shift
|
||||||
|
local files
|
||||||
|
# Selection of files to rotate.
|
||||||
|
#
|
||||||
|
# We consider only the files matching to the pattern.
|
||||||
|
# If the pattern is empty, we'll consider all files.
|
||||||
|
if [ -z "$CLASSES_PATTERN" ]; then
|
||||||
|
# All non-hidden files but no subfolders, symlinks, etc.
|
||||||
|
files=$(cd "$BACKUP_FOLDER"; \
|
||||||
|
ls -1 -t --file-type | "$XARGS" -0 | "$EGREP" -v '[/=>@|$]$' )
|
||||||
|
else
|
||||||
|
# Non-hidden files (but no subfolders, symlinks, etc.) matching to the pattern.
|
||||||
|
files=$(cd "$BACKUP_FOLDER"; \
|
||||||
|
ls -1 -t --file-type | "$XARGS" -0 | "$EGREP" "$CLASSES_PATTERN" )
|
||||||
|
fi
|
||||||
|
# Lack of files gives it up here.
|
||||||
|
[[ -z "$files" ]] && return
|
||||||
|
# Converts the list into an array.
|
||||||
|
local class_files=($files)
|
||||||
|
# We need to process the files listed within the class_files array.
|
||||||
|
# The list is ordered by modification time, reverse.
|
||||||
|
# We'll start with the youngest and step toward the oldest.
|
||||||
|
|
||||||
|
# Collectcs the list of files to delete within this class.
|
||||||
|
#
|
||||||
|
local delete_files="" # list of filenames to delete
|
||||||
|
local pointer=0 # class_files index to process
|
||||||
|
local file_mtime
|
||||||
|
local file_toretain
|
||||||
|
local threshold
|
||||||
|
# Starts with the daily schedule.
|
||||||
|
# We'll retain all files within this schedule.
|
||||||
|
[[ -z "$DOIT" ]] && echo -e "$MSG_SCHEDULE"
|
||||||
|
local last_retained=""
|
||||||
|
for day in $("$SEQ" 1 "$RETAIN_DAYS")
|
||||||
|
do
|
||||||
|
# Finishes if we've no more files.
|
||||||
|
[[ $pointer -ge ${#class_files[@]} ]] && break
|
||||||
|
(( threshold = TIMESTAMP - (day * SECS_DAY) ))
|
||||||
|
file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
# We'll retain all files of this day.
|
||||||
|
while [[ $file_mtime -ge $threshold ]]
|
||||||
|
do
|
||||||
|
[[ -z "$DOIT" ]] && echo "DR ${class_files[$pointer]}"
|
||||||
|
last_retained="$file_mtime"
|
||||||
|
# Next file; finishes if we're no more files.
|
||||||
|
(( pointer++ )); [[ $pointer -ge ${#class_files[@]} ]] && break
|
||||||
|
file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
done
|
||||||
|
# This day concluded.
|
||||||
|
done
|
||||||
|
# The daily schedule concluded.
|
||||||
|
# If we didn't save any file within this schedule we'll retain this file.
|
||||||
|
if [[ -z "$last_retained" && $pointer -lt ${#class_files[@]} ]]; then
|
||||||
|
last_retained="$file_mtime"
|
||||||
|
[[ -z "$DOIT" ]] && echo "DR ${class_files[$pointer]}"
|
||||||
|
(( pointer++ ))
|
||||||
|
[[ $pointer -lt ${#class_files[@]} ]] \
|
||||||
|
&& file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The weekly schedule.
|
||||||
|
# We'll retain only the oldest file from a week within this schedule.
|
||||||
|
last_retained=""
|
||||||
|
for week in $("$SEQ" 1 "$RETAIN_WEEKS")
|
||||||
|
do
|
||||||
|
file_toretain=""
|
||||||
|
# Finishes if we've no more files.
|
||||||
|
[[ $pointer -ge ${#class_files[@]} ]] && break
|
||||||
|
(( threshold = TIMESTAMP - (week * SECS_WEEK) ))
|
||||||
|
file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
while [[ $file_mtime -ge $threshold ]]
|
||||||
|
do
|
||||||
|
if [ -z "$file_toretain" ]; then
|
||||||
|
# This is the first file from this week.
|
||||||
|
# marks it to retain temporailly.
|
||||||
|
file_toretain="${class_files[$pointer]}"
|
||||||
|
else
|
||||||
|
# This is an older file from this week than the previous.
|
||||||
|
# Changes the marker, the previous file should be deleted.
|
||||||
|
delete_files+="$file_toretain\n"
|
||||||
|
[[ -z "$DOIT" ]] && echo "WX $file_toretain"
|
||||||
|
file_toretain="${class_files[$pointer]}"
|
||||||
|
fi
|
||||||
|
# Next file; finishes if we're no more files.
|
||||||
|
(( pointer++ )); [[ $pointer -ge ${#class_files[@]} ]] && break
|
||||||
|
file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
done
|
||||||
|
# The marked file from the week passed has been retained.
|
||||||
|
if [ -n "$file_toretain" ]; then
|
||||||
|
last_retained=$file_mtime # a cheat but it isn't important here
|
||||||
|
[[ -z "$DOIT" ]] && echo "WR $file_toretain"
|
||||||
|
fi
|
||||||
|
# This week concluded.
|
||||||
|
done
|
||||||
|
# The weekly schedule concluded.
|
||||||
|
# If we didn't save any file within this schedule we'll retain this file.
|
||||||
|
if [[ -z "$last_retained" && $pointer -lt ${#class_files[@]} ]]; then
|
||||||
|
last_retained="$file_mtime"
|
||||||
|
[[ -z "$DOIT" ]] && echo "WR ${class_files[$pointer]}"
|
||||||
|
(( pointer++ ))
|
||||||
|
[[ $pointer -lt ${#class_files[@]} ]] \
|
||||||
|
&& file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The monthly schedule.
|
||||||
|
# We'll retain only the oldest file from a month within this schedule.
|
||||||
|
last_retained=""
|
||||||
|
for month in $("$SEQ" 1 "$RETAIN_MONTHS")
|
||||||
|
do
|
||||||
|
file_toretain=""
|
||||||
|
# Finishes if we've no more files.
|
||||||
|
[[ $pointer -ge ${#class_files[@]} ]] && break
|
||||||
|
(( threshold = TIMESTAMP - (month * SECS_MONTH) ))
|
||||||
|
file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
while [[ $file_mtime -ge $threshold ]]
|
||||||
|
do
|
||||||
|
if [ -z "$file_toretain" ]; then
|
||||||
|
# This is the first file from this month.
|
||||||
|
# marks it to retain temporailly.
|
||||||
|
file_toretain="${class_files[$pointer]}"
|
||||||
|
else
|
||||||
|
# This is an older file from this month than the previous.
|
||||||
|
# Changes the marker, the previous file should be deleted.
|
||||||
|
delete_files+="$file_toretain\n"
|
||||||
|
[[ -z "$DOIT" ]] && echo "MX $file_toretain"
|
||||||
|
file_toretain="${class_files[$pointer]}"
|
||||||
|
fi
|
||||||
|
# Next file; finishes if we're no more files.
|
||||||
|
(( pointer++ )); [[ $pointer -ge ${#class_files[@]} ]] && break
|
||||||
|
file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
done
|
||||||
|
# The marked file from the month passed has been retained.
|
||||||
|
if [ -n "$file_toretain" ]; then
|
||||||
|
last_retained=$file_mtime # a cheat but it isn't important here
|
||||||
|
[[ -z "$DOIT" ]] && echo "MR $file_toretain"
|
||||||
|
fi
|
||||||
|
# This month concluded.
|
||||||
|
done
|
||||||
|
# The monthly schedule concluded.
|
||||||
|
# If we didn't save any file within this schedule we'll retain this file.
|
||||||
|
if [[ -z "$last_retained" && $pointer -lt ${#class_files[@]} ]]; then
|
||||||
|
last_retained="$file_mtime"
|
||||||
|
[[ -z "$DOIT" ]] && echo "MR ${class_files[$pointer]}"
|
||||||
|
(( pointer++ ))
|
||||||
|
[[ $pointer -lt ${#class_files[@]} ]] \
|
||||||
|
&& file_mtime=$("$STAT" -c %Y "$BACKUP_FOLDER/${class_files[$pointer]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# All the schedules have been processed.
|
||||||
|
# The remaining files will be deleted all.
|
||||||
|
while [[ $pointer -lt ${#class_files[@]} ]]
|
||||||
|
do
|
||||||
|
delete_files+="${class_files[$pointer]}\n"
|
||||||
|
[[ -z "$DOIT" ]] && echo "AX ${class_files[$pointer]}"
|
||||||
|
(( pointer ++ ))
|
||||||
|
done
|
||||||
|
|
||||||
|
# The delete_files contain the list of iles to delete according this class.
|
||||||
|
if [ -n "$delete_files" ]; then
|
||||||
|
if [ -z "$DOIT" ]; then
|
||||||
|
# Simulated deletion.
|
||||||
|
echo -e "\n$MSG_DELDRY\n$delete_files"
|
||||||
|
else
|
||||||
|
# Actual deletion file by file.
|
||||||
|
for file in $(echo -e "$delete_files")
|
||||||
|
do [[ -n "$file" ]] && rm "$BACKUP_FOLDER/$file" #2>/dev/null
|
||||||
|
done
|
||||||
|
echo -e "\n$MSG_DELREAL\n$delete_files"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Uniform output formatting.
|
||||||
|
[[ -z "$DOIT" ]] && echo
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# This function parses the given class pattern, recursively explores
|
||||||
|
# the classes, subclasses, sub-subclasses and so on, then calls the
|
||||||
|
# rotator function for each definite class.
|
||||||
|
function rotate_classes {
|
||||||
|
|
||||||
|
local CLASSES_PATTERN="$1"; shift
|
||||||
|
[[ -z "$CLASSES_PATTERN" ]] && return # unusable
|
||||||
|
|
||||||
|
# Tries to validate the pattern.
|
||||||
|
# Test calls simulate the later use.
|
||||||
|
if [ -n "$CLASSES_PATTERN" ]; then
|
||||||
|
echo "test" | "$EGREP" "$CLASSES_PATTERN" >/dev/null 2>&1
|
||||||
|
[[ $? -gt 1 ]] && return # unusable
|
||||||
|
fi
|
||||||
|
# Does contain unexplored classifiers?
|
||||||
|
echo "test" | "$SED" -E "s/$CLASSES_PATTERN/\1/" >/dev/null 2>&1
|
||||||
|
if [[ $? -gt 0 ]]; then
|
||||||
|
# It is a definite classifier, let's call the rotator function.
|
||||||
|
rotate_class "$CLASSES_PATTERN"
|
||||||
|
else
|
||||||
|
# Needs further exploring.
|
||||||
|
# Non-hidden files (but no subfolders, symlinks, etc.) matching to the pattern.
|
||||||
|
local files=$(cd "$BACKUP_FOLDER"; \
|
||||||
|
ls -1 -t --file-type | "$XARGS" -0 | "$EGREP" "$CLASSES_PATTERN" )
|
||||||
|
# Selects the qualifier substrings which actually have matching files.
|
||||||
|
local classes=$(echo -e "$files" | "$SED" -E "s/$CLASSES_PATTERN/\1/" | "$SORT" -u)
|
||||||
|
# Enumerates these qualifiers.
|
||||||
|
for class in $classes
|
||||||
|
do
|
||||||
|
# This is same as the CLASSES_PATTERN but contains the definite qualifier instead of
|
||||||
|
# the parenthesised expression - e.g one of tgz and log instad of (tgz|log)
|
||||||
|
local class_pattern=$(echo -e "$CLASSES_PATTERN" | "$SED" -E "s/\([^)]*\)/$class/") #"
|
||||||
|
# Recurses for further exploring.
|
||||||
|
rotate_classes "$class_pattern"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Rotates the classes, subclasses and so on with a recursive function call.
|
||||||
|
if [ -z "$CLASSES_PATTERN" ]; then
|
||||||
|
# All files considered within the same class.
|
||||||
|
rotate_class
|
||||||
|
else
|
||||||
|
# Tries to validate the pattern (loosely).
|
||||||
|
echo "test" | "$EGREP" "$CLASSES_PATTERN" >/dev/null 2>&1
|
||||||
|
[[ $? -gt 1 ]] && echo -e "$MSG_BADPATTERN $CLASSES_PATTERN" >&2 && exit 1
|
||||||
|
# Seems to be valid, go on!
|
||||||
|
rotate_classes "$CLASSES_PATTERN"
|
||||||
|
fi
|
||||||
|
# A final thought about the dry run.
|
||||||
|
[[ -z "$DOIT" ]] && echo -e "$MSG_TODOIT"
|
||||||
|
|
||||||
|
# That's all, Folks :).
|
1
.templates/bin/shutdown
Symbolic link
1
.templates/bin/shutdown
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.launcher
|
1
.templates/bin/startup
Symbolic link
1
.templates/bin/startup
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.launcher
|
81
.templates/bin/watchtower_redeploy
Executable file
81
.templates/bin/watchtower_redeploy
Executable file
@ -0,0 +1,81 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# https://containrrr.dev/watchtower/
|
||||||
|
# https://hub.docker.com/r/containrrr/watchtower
|
||||||
|
# https://github.com/containrrr/watchtower
|
||||||
|
#
|
||||||
|
# Email notification settings below assume the gateway acting
|
||||||
|
# as a smarthost and Docker version v20.10+ is required.
|
||||||
|
|
||||||
|
image="containrrr/watchtower:latest"
|
||||||
|
instance="watchtower"
|
||||||
|
networks=""
|
||||||
|
outfile=""
|
||||||
|
volume=""
|
||||||
|
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in cat dirname docker hostname
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Stops and removes the container (if necessary).
|
||||||
|
if [ -n "$("$DOCKER" ps -q -f name=$instance)" ]
|
||||||
|
then "$DOCKER" stop "$instance"; fi
|
||||||
|
if [ -n "$("$DOCKER" ps -a -q -f name=$instance)" ]
|
||||||
|
then "$DOCKER" rm "$instance"; fi
|
||||||
|
|
||||||
|
# Checks for an upgrade.
|
||||||
|
$DOCKER pull "$image"
|
||||||
|
|
||||||
|
# Creates the container.
|
||||||
|
$DOCKER create \
|
||||||
|
-e TZ=$("$CAT" "/etc/timezone") \
|
||||||
|
-e WATCHTOWER_CLEANUP=true \
|
||||||
|
-e WATCHTOWER_DEBUG=false \
|
||||||
|
-e WATCHTOWER_INCLUDE_STOPPED=true \
|
||||||
|
-e WATCHTOWER_LABEL_ENABLE=true \
|
||||||
|
-e WATCHTOWER_MONITOR_ONLY=true \
|
||||||
|
-e WATCHTOWER_REVIVE_STOPPED=false \
|
||||||
|
-e WATCHTOWER_NO_PULL=false \
|
||||||
|
-e WATCHTOWER_SCHEDULE="0 0 1 * * *" \
|
||||||
|
-e WATCHTOWER_WARN_ON_HEAD_FAILURE="never" \
|
||||||
|
-e WATCHTOWER_NOTIFICATIONS=email \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_FROM="$USER@$(hostname -f)" \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_TO="$USER@$(hostname)" \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_SERVER="host.docker.internal" \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT=25 \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_SERVER_TLS_SKIP_VERIFY=true \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_DELAY=15 \
|
||||||
|
-e WATCHTOWER_NOTIFICATION_EMAIL_SUBJECTTAG="[Watchtower $("$HOSTNAME")]" \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--add-host="host.docker.internal:host-gateway" \
|
||||||
|
--restart unless-stopped \
|
||||||
|
--label "com.centurylinklabs.watchtower.enable=true" \
|
||||||
|
--name $instance $image
|
||||||
|
|
||||||
|
# Connects it to the network(s).
|
||||||
|
if [ -n "$networks" ]; then
|
||||||
|
for network in $networks
|
||||||
|
do
|
||||||
|
# Checks the network, creates it if necessary.
|
||||||
|
if [ -z "$("$DOCKER" network ls -q -f name=$network)" ]
|
||||||
|
then "$DOCKER" network create -d bridge "$network"; fi
|
||||||
|
# Then connects.
|
||||||
|
$DOCKER network connect $network $instance
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Finally launches it.
|
||||||
|
$DOCKER start $instance
|
||||||
|
if [ -n "$outfile" -a -w "$("$DIRNAME" "$outfile")" ]; then
|
||||||
|
# Sets a background process to collect the image's output.
|
||||||
|
# This process will automatically terminate when the image stops.
|
||||||
|
"$DOCKER" logs -f $instance >>"$outfile" 2>&1 &
|
||||||
|
fi
|
3
.templates/crontab
Normal file
3
.templates/crontab
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
@reboot CRON=1 USER=$LOGNAME SLEEP_BETWEEN=60 $HOME/bin/maintenance_reboot
|
||||||
|
01 00 * * * CRON=1 USER=$LOGNAME SLEEP_BETWEEN=5 $HOME/bin/maintenance_midnight
|
||||||
|
00 04 * * * CRON=1 USER=$LOGNAME SLEEP_BETWEEN=120 $HOME/bin/maintenance_daily
|
5
.templates/nginx/.nginx/.gitignore
vendored
Normal file
5
.templates/nginx/.nginx/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# Ignore everything else in this directory.
|
||||||
|
*
|
||||||
|
!.gitignore
|
||||||
|
!_default.conf
|
||||||
|
!_general.conf
|
14
.templates/nginx/.nginx/_default.conf
Normal file
14
.templates/nginx/.nginx/_default.conf
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# A restrictive default virtualhost configuration.
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
root /var/www/html;
|
||||||
|
index index.html index.htm index.nginx-debian.html;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
default_type text/html;
|
||||||
|
return 404 "<span style='font-size: large'>$hostname</span>";
|
||||||
|
}
|
||||||
|
}
|
14
.templates/nginx/.nginx/_general.conf
Normal file
14
.templates/nginx/.nginx/_general.conf
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# NGINX general configuration for Docker services.
|
||||||
|
|
||||||
|
# Doesn't explore unnecessary info.
|
||||||
|
server_tokens off;
|
||||||
|
# Depends on ngx_http_security_headers_module.so
|
||||||
|
#hide_server_tokens on;
|
||||||
|
|
||||||
|
# https://amalgjose.com/2020/05/15/how-to-set-the-allowed-url-length-for-a-nginx-request-error-code-414-uri-too-large/
|
||||||
|
large_client_header_buffers 4 32k;
|
||||||
|
|
||||||
|
# https://serverfault.com/questions/602201/nginx-possible-bug-with-dashes-in-server-name
|
||||||
|
# https://trac.nginx.org/nginx/ticket/571
|
||||||
|
#server_names_hash_bucket_size 64;
|
||||||
|
|
2
.templates/nginx/conf.d/services.conf
Normal file
2
.templates/nginx/conf.d/services.conf
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# Docker services
|
||||||
|
include $PAR_SERVICEBASE/.nginx/*.conf;
|
98
.templates/nginx/nginx.conf
Normal file
98
.templates/nginx/nginx.conf
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
# NGINX configuration for a virtualhost proxied to a Docker service.
|
||||||
|
# Uses https://github.com/acmesh-official/acme.sh to manage SSL certificates.
|
||||||
|
|
||||||
|
# Flags the non 2xx or non 3xx (probably error) responses.
|
||||||
|
map $status $errorlog { ~^[23] 0; default 1; }
|
||||||
|
|
||||||
|
# Virtualhost's configuration follows.
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
# listen 443 ssl;
|
||||||
|
|
||||||
|
server_name_in_redirect on;
|
||||||
|
server_name $PAR_SERVERNAME;
|
||||||
|
|
||||||
|
set $server_admin webmaster@$server_name;
|
||||||
|
|
||||||
|
# access log and error log.
|
||||||
|
# Any requests getting a non 2xx or non 3xx response will go to the error log as well.
|
||||||
|
access_log $PAR_SERVICE/logs/web/access.log combined;
|
||||||
|
access_log $PAR_SERVICE/logs/web/error.log combined if=$errorlog;
|
||||||
|
|
||||||
|
# Let's Encrypt (acme.sh) support.
|
||||||
|
location /.well-known/ {
|
||||||
|
proxy_pass http://$PAR_ACMEHOST:$PAR_ACMEPORT;
|
||||||
|
error_page 500 502 503 504 @proxy_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Forced redirect to https.
|
||||||
|
# if ($scheme = http) {
|
||||||
|
# return 301 https://$host$request_uri;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# Webapp's configuration.
|
||||||
|
charset utf-8;
|
||||||
|
location /$PAR_LOCATION {
|
||||||
|
proxy_pass http://$PAR_PROXYHOST:$PAR_PROXYPORT/$PAR_LOCATION;
|
||||||
|
error_page 500 502 503 504 @proxy_error;
|
||||||
|
|
||||||
|
client_max_body_size 1G;
|
||||||
|
keepalive_timeout 30;
|
||||||
|
proxy_read_timeout 300;
|
||||||
|
|
||||||
|
proxy_request_buffering on;
|
||||||
|
proxy_buffers 2048 16k;
|
||||||
|
proxy_buffer_size 16k;
|
||||||
|
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Forwarded-Server $host;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
#websockets
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
}
|
||||||
|
location @proxy_error {
|
||||||
|
default_type text/html;
|
||||||
|
return 500
|
||||||
|
"<span style='font-size: x-large'>Sorry something went wrong. Try again a bit later.<br>
|
||||||
|
You may report this at <a href='mailto:$server_admin'>$server_admin</a>.</span>";
|
||||||
|
}
|
||||||
|
|
||||||
|
# No static service.
|
||||||
|
# location / {
|
||||||
|
# default_type text/html;
|
||||||
|
# return 404 "<span style='font-size: x-large'>Sorry try <a href='$scheme://$server_name/$PAR_LOCATION'>$scheme://$server_name/$PAR_LOCATION</a> instead.</span>";
|
||||||
|
# }
|
||||||
|
|
||||||
|
##################################################################################
|
||||||
|
# The SSL part
|
||||||
|
# https://ssl-config.mozilla.org/
|
||||||
|
# https://community.letsencrypt.org/t/howto-a-with-all-100-s-on-ssl-labs-test-using-nginx-mainline-stable/55033
|
||||||
|
|
||||||
|
# ssl_certificate $PAR_SERVICE/configs/acme/$PAR_SERVERNAME/fullchain.cer;
|
||||||
|
# ssl_certificate_key $PAR_SERVICE/configs/acme/$PAR_SERVERNAME/$PAR_SERVERNAME.key;
|
||||||
|
|
||||||
|
# Settings to achieve 'A' grade on https://www.ssllabs.com/ssltest/
|
||||||
|
ssl_session_timeout 1440m;
|
||||||
|
ssl_session_tickets off;
|
||||||
|
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA";
|
||||||
|
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
|
||||||
|
# Read before activating: https://blog.g3rt.nl/nginx-add_header-pitfall.html
|
||||||
|
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||||
|
# add_header X-Frame-Options SAMEORIGIN;
|
||||||
|
# add_header X-Content-Type-Options nosniff;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# That's all.
|
3
.templates/sudoers.d/user
Normal file
3
.templates/sudoers.d/user
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Permits to reload the webserver.
|
||||||
|
$PAR_USER ALL=(ALL) NOPASSWD: /usr/sbin/apachectl configtest, /usr/bin/systemctl reload apache2
|
||||||
|
$PAR_USER ALL=(ALL) NOPASSWD: /usr/sbin/nginx -t, /usr/bin/systemctl reload nginx
|
8
README.md
Normal file
8
README.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
|
||||||
|
If you have just downloaded and unzipped the zipball archive, you have to get
|
||||||
|
run the setpermissions.sh shell script to restore the symlinks and properly
|
||||||
|
set the Linux permissions on the files. This script will delete some unnecessary
|
||||||
|
content (e.g the .gitignore files) as well. When the script has run, you can
|
||||||
|
safely delete it and the .metadata file. When you complete the entire install,
|
||||||
|
you can delete this readme as well.
|
||||||
|
|
3
configs/.gitignore
vendored
Normal file
3
configs/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Ignore everything else in this directory.
|
||||||
|
*
|
||||||
|
!.gitignore
|
5
docker/.gitignore
vendored
Normal file
5
docker/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# Ignore everything else in this directory.
|
||||||
|
*
|
||||||
|
!.gitignore
|
||||||
|
!README.md
|
||||||
|
|
0
docker/README.md
Normal file
0
docker/README.md
Normal file
4
logs/.gitignore
vendored
Normal file
4
logs/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Ignore everything else in this directory.
|
||||||
|
*
|
||||||
|
!.gitignore
|
||||||
|
!web
|
3
logs/web/.gitignore
vendored
Normal file
3
logs/web/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Ignore everything in this directory except this file.
|
||||||
|
*
|
||||||
|
!.gitignore
|
79
setpermissions.sh
Normal file
79
setpermissions.sh
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Helper script to clean up, restore symlinks and set permissions
|
||||||
|
# in a just downloaded and uncompressed (zipball) instance.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-09-11 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2022-11-04 v0.2
|
||||||
|
# fix: Content regexp now matches well to the special chars allowed.
|
||||||
|
# 2022-09-22 v0.1 Initial release.
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
[[ -z "$(which dirname)" ]] && exit 1
|
||||||
|
[[ -z "$(which readlink)" ]] && exit 1
|
||||||
|
#
|
||||||
|
SOURCE="$0"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$(dirname "$SOURCE" )" && echo "$PWD" )" #"
|
||||||
|
SOURCE="$(readlink "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$(dirname "$SOURCE" )" && echo "$PWD" )" #"
|
||||||
|
# We've the absolute path of the script.
|
||||||
|
|
||||||
|
# Removing some unnecessary files.
|
||||||
|
#
|
||||||
|
if [ -n "$(which find)" ]; then
|
||||||
|
find "$SCRPATH" -name .gitignore -delete
|
||||||
|
fi
|
||||||
|
if [ -d "$SCRPATH/.git" ]; then
|
||||||
|
rm -Rf "$SCRPATH/.git" 2>/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restoring the symlinks.
|
||||||
|
# All files considered as a symlink which is containing a single line
|
||||||
|
# with a valid pathname inside.
|
||||||
|
#
|
||||||
|
if [ -n "$(which awk)" -a -n "$(which cut)" -a -n "$(which find)" -a -n "$(which ln)" ]; then
|
||||||
|
# Files containing maximum 1 line.
|
||||||
|
IFS=$'\n' read -r -d '' -a SUSPECTS < <( (find "$SCRPATH" -type f -exec awk 'END { if (NR < 2) print FILENAME }' {} \;) && printf '\0' )
|
||||||
|
# Enumerates these files.
|
||||||
|
for suspect in "${SUSPECTS[@]}"
|
||||||
|
do
|
||||||
|
if [ -n "$suspect" -a -s "$suspect" ]; then
|
||||||
|
# Checks the content: it seems to be a valid pathname?
|
||||||
|
# For this tricky read see https://stackoverflow.com/questions/46163678/
|
||||||
|
IFS= read -r -n 1024 -d '' content <"$suspect" || [[ $content ]]
|
||||||
|
if [[ "$content" =~ ^([[:alnum:]]|[-_/. ])+$ ]]; then
|
||||||
|
# Replaces the suspect with a symlink pointing to it's content.
|
||||||
|
rm "$suspect" 2>/dev/null ; ln -s "$content" "$suspect" 2>/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restoring the permissions.
|
||||||
|
#
|
||||||
|
# First a base setup - sets permissions to 770/660 [umask 007]
|
||||||
|
chmod -R g+rw,o-rw "$SCRPATH"
|
||||||
|
find "$SCRPATH" -type d -exec chmod 2771 {} \;
|
||||||
|
[[ -n "$(which setfacl)" ]] && setfacl -R -d -m u:$USER:rwX,g::rwX "$SCRPATH" 2>/dev/null
|
||||||
|
#
|
||||||
|
# Then we'll use the metastore DB to set the permissions individually.
|
||||||
|
#
|
||||||
|
if [ -n "$(which metastore)" -a -x "$(which metastore)" ]; then
|
||||||
|
( cd "$SCRPATH"
|
||||||
|
if [ -e ".metadata" ]; then
|
||||||
|
"$(which metastore)" -amqq 2>/dev/null
|
||||||
|
fi
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
4
storage/backups/tarballs/.gitignore
vendored
Normal file
4
storage/backups/tarballs/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Ignore everything in this directory except these files.
|
||||||
|
*
|
||||||
|
!.gitignore
|
||||||
|
!.rotate_folder.conf
|
9
storage/backups/tarballs/.rotate_folder.conf
Normal file
9
storage/backups/tarballs/.rotate_folder.conf
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# This is a shell script excerpt for configuration purposes only.
|
||||||
|
# Handle with care! Please don't put code here, only variables.
|
||||||
|
|
||||||
|
CLASSES_PATTERN="^([^.]*)\..*\.$HOSTNAME\.(dmp|sql\.gz|tgz|log)$"
|
||||||
|
DOIT="yes" # if empty the script makes a dry run
|
||||||
|
# RETAIN_DAYS=7 # retains all files created within that many days
|
||||||
|
# RETAIN_WEEKS=4 # retains one file per week/month,
|
||||||
|
# RETAIN_MONTHS=12 # created within that many weeks/months
|
||||||
|
|
58
tools/.launcher
Executable file
58
tools/.launcher
Executable file
@ -0,0 +1,58 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Simple launcher script to start some worker scripts within a folder
|
||||||
|
# sequentially, in ABC order. The workers' folder is a subfolder of the
|
||||||
|
# script's folder with same name as the script, followed by a .d extension.
|
||||||
|
# All worker scripts must be executable and must have .sh extension.
|
||||||
|
# Other files within the folder will be simply ignored.
|
||||||
|
#
|
||||||
|
# This script usually get called via symlink, therefore the workers' folder's
|
||||||
|
# name comes from the symlink's name. This folder selection may be overridden
|
||||||
|
# using an environment variable. Another environment variable may order to
|
||||||
|
# waiting a time between workers' launch.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021-08-30 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
SLEEP_BETWEEN=${SLEEP_BETWEEN-"0"} # Secs between launches
|
||||||
|
WORKER_DIR=${WORKER_DIR:-""} # Worker's folder
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_MISSINGWRK="Fatal: missing worker's folder"
|
||||||
|
|
||||||
|
# Checks the components.
|
||||||
|
[[ -z "$(which basename)" ]] && exit 1
|
||||||
|
[[ -z "$(which dirname)" ]] && exit 1
|
||||||
|
[[ -z "$(which printf)" ]] && exit 1
|
||||||
|
[[ -z "$(which sleep)" ]] && exit 1
|
||||||
|
# Where I'm?
|
||||||
|
SCRPATH="$( cd -P "$(dirname "$0" )" && echo "$PWD" )" #"
|
||||||
|
|
||||||
|
# Checks the worker's folder.
|
||||||
|
[[ -z "$WORKER_DIR" ]] && WORKER_DIR="$SCRPATH/$(basename "$0").d"
|
||||||
|
if [ -z "$WORKER_DIR" -o ! -d "$WORKER_DIR" ]; then
|
||||||
|
echo "$MSG_MISSINGWRK $WORKER_DIR" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Converts the (optional) time parameter to float.
|
||||||
|
SLEEP_BETWEEN=$(printf '%.2f' "$SLEEP_BETWEEN" 2>/dev/null)
|
||||||
|
|
||||||
|
# Enumerates the workers.
|
||||||
|
WORKERS="$(cd "$WORKER_DIR"; ls -1 *.sh 2>/dev/null)"
|
||||||
|
for worker in $WORKERS ""
|
||||||
|
do
|
||||||
|
# Safety first...
|
||||||
|
if [ -n "$worker" -a -x "$WORKER_DIR/$worker" ]; then
|
||||||
|
# Launches the worker then waits till it finishes.
|
||||||
|
"$WORKER_DIR/$worker" "$@"
|
||||||
|
# Optionally reduces the launch frequency.
|
||||||
|
sleep ${SLEEP_BETWEEN//,/.} # decimal point need
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# That's all, Folks!
|
47
tools/acme
Executable file
47
tools/acme
Executable file
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# https://github.com/acmesh-official/acme.sh
|
||||||
|
#
|
||||||
|
# A humble wrapper script to the acme.sh tool which have to exist
|
||||||
|
# somewhere in PATH. Sets the tool to use this service's config
|
||||||
|
# and log files.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021-09-14 v0.1 Initial release
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_MISSINGTOOL="Fatal: missing socket relay tool"
|
||||||
|
MSG_MISSINGWORKER="Fatal: missing worker script"
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
#
|
||||||
|
SCRPATH="$( cd -P "$( "$(which dirname)" "$0" )" && echo "$PWD" )"
|
||||||
|
[[ -z "$SCRPATH" ]] && exit 1
|
||||||
|
|
||||||
|
# Where is the service's base?
|
||||||
|
#
|
||||||
|
SERVICE="$( cd -P "$( "$(which dirname)" "$SCRPATH" )" && echo "$PWD" )"
|
||||||
|
[[ -z "$SERVICE" ]] && exit 1
|
||||||
|
|
||||||
|
# Checks the worker components.
|
||||||
|
#
|
||||||
|
ACME="$(PATH="$SCRPATH:$PATH" which acme.sh)"
|
||||||
|
if [ -z "$ACME" -o ! -x "$ACME" ]; then
|
||||||
|
echo -e "$MSG_MISSINGWORKER acme.sh" >&2; exit 1
|
||||||
|
fi
|
||||||
|
SOCAT="$(PATH="$SCRPATH:$PATH" which socat)"
|
||||||
|
if [ -z "$SOCAT" -o ! -x "$SOCAT" ]; then
|
||||||
|
echo -e "$MSG_MISSINGTOOL socat" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Finally launches the worker with the original command line parameters.
|
||||||
|
#
|
||||||
|
export LE_WORKING_DIR="$SERVICE/configs/acme"
|
||||||
|
export LE_CONFIG_HOME="$SERVICE/configs/acme"
|
||||||
|
export LOG_FILE="$SERVICE/logs/web/acme.log"
|
||||||
|
"$ACME" "$@"
|
1
tools/backup
Symbolic link
1
tools/backup
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.launcher
|
78
tools/backup.d/configs_backup.sh
Executable file
78
tools/backup.d/configs_backup.sh
Executable file
@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Backups the configuration files of this docker-composed service.
|
||||||
|
# This is a general purpose worker script, doesn't requires customization.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2021-09-03 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
PAR_BASEDIR=${PAR_BASEDIR:-""} # Service's base folder
|
||||||
|
PAR_BACKUPDIR=${PAR_BACKUPDIR:-""} # Folder to dump within
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
BACKUPDIR="storage/backups/tarballs" # Folder to dump within
|
||||||
|
USER=${USER:-LOGNAME} # Fix for cron enviroment only
|
||||||
|
YMLFILE="docker-compose.yml"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_MISSINGYML="Fatal: didn't find the docker-compose.yml file"
|
||||||
|
MSG_NONWRITE="The target directory isn't writable"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in date dirname hostname readlink tar
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]' | "$TR" '-' '_')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
SOURCE="$0"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SOURCE="$("$READLINK" "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
|
||||||
|
# Searches the base folder, containing a docker-compose.yml file.
|
||||||
|
# Called from the base folder (./)?
|
||||||
|
BASE_DIR="$PAR_BASEDIR"
|
||||||
|
TEST_DIR="$SCRPATH"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools/*.d?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# On failure gives it up here.
|
||||||
|
if [ -z "$BASE_DIR" -o ! -r "$BASE_DIR/$YMLFILE" ]; then
|
||||||
|
echo "$MSG_MISSINGYML" >&2; exit 1
|
||||||
|
fi
|
||||||
|
# Sets the absolute paths.
|
||||||
|
BACKUPDIR="${PAR_BACKUPDIR:-$BASE_DIR/$BACKUPDIR}"
|
||||||
|
|
||||||
|
# The dump target folder must be writable.
|
||||||
|
[[ ! -w "$BACKUPDIR" ]] \
|
||||||
|
&& echo "$MSG_NONWRITE: $BACKUPDIR" >&2 && exit 1
|
||||||
|
|
||||||
|
# Tries the FS backup.
|
||||||
|
if [ -w "$BACKUPDIR" ]; then
|
||||||
|
BACKUP_NAME="configs.$("$DATE" '+%Y%m%d_%H%M%S').$("$HOSTNAME")"
|
||||||
|
( cd "$BASE_DIR"
|
||||||
|
"$TAR" czf "$BACKUPDIR/$BACKUP_NAME.tgz" \
|
||||||
|
"$YMLFILE" configs docker \
|
||||||
|
2>>"$BACKUPDIR/$BACKUP_NAME.log"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
73
tools/build
Executable file
73
tools/build
Executable file
@ -0,0 +1,73 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Builds a dockerized service defined by a docker-compose.yml file.
|
||||||
|
# This is a general purpose launcher, doesn't requires customization.
|
||||||
|
# Actually it is a humble wrapper script for a 'docker-compose build'
|
||||||
|
# command. Always pulls the latest image(s) from repository.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021-10-03 v0.2
|
||||||
|
# fix: now pulls the images even if the service doesn't requires build.
|
||||||
|
# 2021-08-30 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
PAR_BASEDIR=${PAR_BASEDIR:-""} # Service's base folder.
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
YMLFILE="docker-compose.yml"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_ALREADYRUN="This service is running - shut down before build."
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_MISSINGYML="Fatal: didn't find the docker-compose.yml file"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in dirname docker-compose readlink
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]' | "$TR" '-' '_')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
SOURCE="$0"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SOURCE="$("$READLINK" "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
|
||||||
|
# Searches the base folder, containing a docker-compose.yml file.
|
||||||
|
# Called from the base folder (./)?
|
||||||
|
BASE_DIR="$PAR_BASEDIR"
|
||||||
|
TEST_DIR="$SCRPATH"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools/*.d?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# On failure gives it up here.
|
||||||
|
if [ -z "$BASE_DIR" -o ! -r "$BASE_DIR/$YMLFILE" ]; then
|
||||||
|
echo "$MSG_MISSINGYML" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Doesn't attempts to build if the service is running.
|
||||||
|
if [ -n "$(cd "$BASE_DIR"; "$DOCKER_COMPOSE" ps --services --filter "status=running")" ]; then
|
||||||
|
echo "$MSG_ALREADYRUN" >&2; exit 1
|
||||||
|
fi
|
||||||
|
# Pulls the components and builds the service (if necessary).
|
||||||
|
(cd "$BASE_DIR"; "$DOCKER_COMPOSE" rm -f; "$DOCKER_COMPOSE" pull; "$DOCKER_COMPOSE" build)
|
||||||
|
|
||||||
|
# That's all, Folks!
|
30
tools/customize_apache2.sh
Normal file
30
tools/customize_apache2.sh
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Set the variables below then run this script to generate config/apache2.conf and
|
||||||
|
# config/apache2_ssl.conf (the webserver configuration files for this service).
|
||||||
|
# Take a revision then symlink them into the instances/.apache2 folder and reload
|
||||||
|
# the webserver to activate.
|
||||||
|
|
||||||
|
PAR_ACMEHOST="localhost"
|
||||||
|
PAR_ACMEPORT="8100"
|
||||||
|
PAR_LOCATION=""
|
||||||
|
PAR_SERVICE="$HOME/services/servicename"
|
||||||
|
PAR_PROXYHOST="localhost"
|
||||||
|
PAR_PROXYPORT="8201"
|
||||||
|
PAR_SERVERNAME="www.example.com"
|
||||||
|
PAR_LOCATION=""
|
||||||
|
|
||||||
|
# Do not change anything below.
|
||||||
|
PARAMETERS='$PAR_ACMEHOST:$PAR_ACMEPORT:$PAR_SERVICE:$PAR_PROXYHOST:$PAR_PROXYPORT'
|
||||||
|
PARAMETERS+=':$PAR_SERVERNAME:$PAR_LOCATION'
|
||||||
|
for parameter in $(echo "$PARAMETERS" | tr ":" "\n")
|
||||||
|
do export ${parameter:1}; done
|
||||||
|
cat "$PAR_SERVICE/.templates/apache2/apache2.conf" | envsubst "$PARAMETERS" \
|
||||||
|
> "$PAR_SERVICE/configs/apache2.conf"
|
||||||
|
|
||||||
|
|
||||||
|
PARAMETERS+=""
|
||||||
|
for parameter in $(echo "$PARAMETERS" | tr ":" "\n")
|
||||||
|
do export ${parameter:1}; done
|
||||||
|
cat "$PAR_SERVICE/.templates/apache2/apache2_ssl.conf" | envsubst "$PARAMETERS" \
|
||||||
|
> "$PAR_SERVICE/configs/apache2_ssl.conf"
|
20
tools/customize_nginx.sh
Normal file
20
tools/customize_nginx.sh
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Set the variables below then run this script to generate the config/nginx.conf
|
||||||
|
# (the webserver configuration file for this service). Take a revision then
|
||||||
|
# symlink it into the instances/.nginx folder and reload the webserver to activate.
|
||||||
|
|
||||||
|
PAR_ACMEHOST="localhost"
|
||||||
|
PAR_ACMEPORT="8100"
|
||||||
|
PAR_SERVICE="$HOME/services/servicename"
|
||||||
|
PAR_PROXYHOST="localhost"
|
||||||
|
PAR_PROXYPORT="8201"
|
||||||
|
PAR_SERVERNAME="www.example.com"
|
||||||
|
PAR_LOCATION=""
|
||||||
|
|
||||||
|
# Do not change anything below.
|
||||||
|
PARAMETERS='$PAR_ACMEHOST:$PAR_ACMEPORT:$PAR_SERVICE:$PAR_PROXYHOST:$PAR_PROXYPORT:$PAR_SERVERNAME:$PAR_LOCATION'
|
||||||
|
for parameter in $(echo "$PARAMETERS" | tr ":" "\n")
|
||||||
|
do export ${parameter:1}; done
|
||||||
|
cat "$PAR_SERVICE/.templates/nginx/nginx.conf" | envsubst "$PARAMETERS" \
|
||||||
|
> "$PAR_SERVICE/configs/nginx.conf"
|
63
tools/maintenance_daily
Executable file
63
tools/maintenance_daily
Executable file
@ -0,0 +1,63 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Maintenance operations at once a day.
|
||||||
|
# This script called usually by the cron (but indirectly).
|
||||||
|
#
|
||||||
|
# Uses the rotate_folder utility which must be available on path.
|
||||||
|
# Uses the acme wrapper script which have to exist in same folder
|
||||||
|
# as this script.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021-09-14 v0.2
|
||||||
|
# add: Checks the SSL web certificate (if any), renews it if necessary.
|
||||||
|
# 2021-09-01 v0.1 Initial release
|
||||||
|
|
||||||
|
# Checks the components.
|
||||||
|
[[ -z "$(which dirname)" ]] && exit 1
|
||||||
|
[[ -z "$(which readlink)" ]] && exit 1
|
||||||
|
[[ -z "$(which xargs)" ]] && exit 1
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
SCRPATH="$( cd -P "$( "$(which dirname)" "$0" )" && echo "$PWD" )"
|
||||||
|
|
||||||
|
# Checks the SSL web certificate, renews it if necessery.
|
||||||
|
#
|
||||||
|
# Uses the acme wrapper script located in the same directory.
|
||||||
|
ACME="$SCRPATH/acme"
|
||||||
|
ACMELOG="$($(which dirname) "$SCRPATH")/logs/web/acme.log"
|
||||||
|
if [ -n "$ACME" -a -x "$ACME" ]; then
|
||||||
|
"$ACME" --cron >> "$ACMELOG" 2>&1
|
||||||
|
fi
|
||||||
|
# Done with the certificate.
|
||||||
|
|
||||||
|
# Daily backup operations.
|
||||||
|
#
|
||||||
|
# Launches the backup script.
|
||||||
|
[[ -x "$SCRPATH/backup" ]] && "$SCRPATH/backup"
|
||||||
|
# Done with backups.
|
||||||
|
|
||||||
|
# Rotates the backup folders.
|
||||||
|
#
|
||||||
|
# Enumerates the folders and tries to rotate they content.
|
||||||
|
BACKUPSROOT="$("$(which dirname)" "$SCRPATH")/storage/backups" #"
|
||||||
|
for folder in $(ls -1 "$BACKUPSROOT" 2>/dev/null | $(which xargs) -0 ) ""
|
||||||
|
do
|
||||||
|
if [ -n "$folder" ]; then
|
||||||
|
# Dereferenced absolute path.
|
||||||
|
folder="$("$(which readlink)" -e "$BACKUPSROOT/$folder")" #"
|
||||||
|
# Does it a folder with a prepared configuration?
|
||||||
|
if [ -d "$folder" -a -r "$folder/.rotate_folder.conf" ]; then
|
||||||
|
# Does the rotate job.
|
||||||
|
if [ -x "$SCRPATH/rotate_folder" ]; then
|
||||||
|
"$SCRPATH/rotate_folder" -f "$folder" >/dev/null
|
||||||
|
elif [ -x "$(which rotate_folder)" ]; then
|
||||||
|
"$(which rotate_folder)" -f "$folder" >/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# Done with rotating.
|
18
tools/maintenance_midnight
Executable file
18
tools/maintenance_midnight
Executable file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Maintenance operations at midnight.
|
||||||
|
# This script called usually by the cron (but indirectly).
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021-08-30 v0.1 Initial release
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
SCRPATH="$( cd -P "$( "$(which dirname)" "$0" )" && echo "$PWD" )"
|
||||||
|
|
||||||
|
# Launches the logrotate for service logs.
|
||||||
|
|
||||||
|
[[ -x "$SCRPATH/rotate_logs" ]] && "$SCRPATH/rotate_logs" >/dev/null 2>&1
|
42
tools/maintenance_reboot
Executable file
42
tools/maintenance_reboot
Executable file
@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Maintenence operations at reboot.
|
||||||
|
# This script called usually by the cron.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021.08-30 v0.1 Initial release
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
#
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
#
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename cut dirname readlink
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
SOURCE="${BASH_SOURCE[0]}"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$( "$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SOURCE="$("$READLINK" "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$( "$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SCRFILE="$("$BASENAME" "$(test -L "$0" && "$READLINK" "$0" || echo "$0")")" #"
|
||||||
|
|
||||||
|
# Actually this job does nothing.
|
158
tools/rotate_logs
Executable file
158
tools/rotate_logs
Executable file
@ -0,0 +1,158 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# A simple wrapper script to rotate the service's logs. Creates a little bit
|
||||||
|
# crafted logrotate configuration (if it doesn't exist yet) and calls the
|
||||||
|
# standard logrotate.
|
||||||
|
#
|
||||||
|
# Uses the copytruncate utility which must be available on path.
|
||||||
|
# Doesn't rotate logs for stopped services.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# Kovács Zoltán <kovacsz@marcusconsulting.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2023-06-18 v1.0
|
||||||
|
# new: forked from the "Smartfront's DOCKER_skeleton" repository.
|
||||||
|
# 2021-09-14 v0.2
|
||||||
|
# add: Rotates the web logs (if any) as well.
|
||||||
|
# 2021-09-02 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
PAR_BASEDIR=${PAR_BASEDIR:-""} # Service's base folder
|
||||||
|
PAR_CONFDIR=${PAR_CONFDIR:-""} # Folder containing configs
|
||||||
|
PAR_LOGDIR=${PAR_LOGDIR:-""} # Folder containing logs
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
CONFDIR="configs" # Folder containing configs
|
||||||
|
LOGDIR="logs" # Folder containing logs
|
||||||
|
YMLFILE="docker-compose.yml"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
|
||||||
|
# Basic environment settings.
|
||||||
|
LANG=C
|
||||||
|
LC_ALL=C
|
||||||
|
# We need also the sbin directories.
|
||||||
|
if ! [[ "$PATH" =~ '/sbin:' ]]; then
|
||||||
|
PATH="$PATH:/usr/local/sbin:/usr/sbin:/sbin"; fi
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in basename date dirname docker-compose logrotate readlink
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]' | "$TR" '-' '_')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
# All dependencies are available via "$THECOMMAND" (upper case) call.
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
SOURCE="${BASH_SOURCE[0]}"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$( "$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SOURCE="$("$READLINK" "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$( "$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SCRFILE="$("$BASENAME" "$(test -L "$0" && "$READLINK" "$0" || echo "$0")")" #"
|
||||||
|
|
||||||
|
# Searches the base folder, containing a docker-compose.yml file.
|
||||||
|
# Called from the base folder (./)?
|
||||||
|
BASE_DIR="$PAR_BASEDIR"
|
||||||
|
TEST_DIR="$SCRPATH"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools/*.d?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# On failure gives it up here.
|
||||||
|
if [ -z "$BASE_DIR" -o ! -r "$BASE_DIR/$YMLFILE" ]; then
|
||||||
|
echo "$MSG_MISSINGYML" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Sets the absolute paths.
|
||||||
|
CONFDIR="${PAR_CONFDIR:-$BASE_DIR/$CONFDIR}"
|
||||||
|
CONFFILE="$CONFDIR/.${SCRFILE%.*}.conf"
|
||||||
|
STATEFILE="$CONFDIR/.${SCRFILE%.*}.state"
|
||||||
|
LOGDIR="${PAR_LOGDIR:-$BASE_DIR/$LOGDIR}"
|
||||||
|
|
||||||
|
# Doesn't rotate logs for stopped services.
|
||||||
|
[[ -z "$(cd "$BASE_DIR"; "$DOCKER_COMPOSE" ps --services --filter "status=running")" ]] \
|
||||||
|
&& exit 0
|
||||||
|
|
||||||
|
# Locates the worker script.
|
||||||
|
WORKERSCRIPT="$SCRPATH/copytruncate"
|
||||||
|
[[ ! -x "$WORKERSCRIPT" ]] && WORKERSCRIPT="$(which copytruncate)"
|
||||||
|
[[ ! -x "$WORKERSCRIPT" ]] \
|
||||||
|
&& echo -e "$MSG_MISSINGDEP $WORKERSCRIPT." >&2 \
|
||||||
|
&& exit 1
|
||||||
|
|
||||||
|
# Creates the configuration if it doesn't exist yet.
|
||||||
|
if [ ! -e "$CONFFILE" ]; then
|
||||||
|
cat > "$CONFFILE" << EOF
|
||||||
|
$LOGDIR/*.log {
|
||||||
|
missingok
|
||||||
|
daily
|
||||||
|
rotate 30
|
||||||
|
# Must be consistent with prerotate script's settings!
|
||||||
|
dateext
|
||||||
|
dateyesterday
|
||||||
|
dateformat %Y-%m-%d.
|
||||||
|
extension log
|
||||||
|
compress
|
||||||
|
# We'll use our own copytruncate script, because:
|
||||||
|
# * we haven't permission to change ownership, so the built-in copytruncate
|
||||||
|
# method would fail (this is a bug in logrotate, I think);
|
||||||
|
# * we haven't permission to reload the service, so the create new log method
|
||||||
|
# doesn't work - the service would still write to the already rotated file.
|
||||||
|
# The custom script:
|
||||||
|
# * copytruncates files having (by default) .log extesion and compresses the copy
|
||||||
|
# (and returns with 1 exit code, thus, the logrotate will skip this file).
|
||||||
|
# * does nothing with files having any other extensions - e.g .1, .2 and so on
|
||||||
|
# (and returns with 0 exit code, thus, the logrotate can process this file);
|
||||||
|
prerotate
|
||||||
|
$WORKERSCRIPT \$1
|
||||||
|
endscript
|
||||||
|
# Only if the prerotate script didn't process it yet.
|
||||||
|
copytruncate
|
||||||
|
}
|
||||||
|
|
||||||
|
$LOGDIR/web/*.log {
|
||||||
|
missingok
|
||||||
|
daily
|
||||||
|
rotate 60
|
||||||
|
# Must be consistent with prerotate script's settings!
|
||||||
|
dateext
|
||||||
|
dateyesterday
|
||||||
|
dateformat %Y-%m-%d.
|
||||||
|
extension log
|
||||||
|
compress
|
||||||
|
# We'll use our own copytruncate script, because:
|
||||||
|
# * we haven't permission to change ownership, so the built-in copytruncate
|
||||||
|
# method would fail (this is a bug in logrotate, I think);
|
||||||
|
# * we haven't permission to reload the service, so the create new log method
|
||||||
|
# doesn't work - the service would still write to the already rotated file.
|
||||||
|
# The custom script:
|
||||||
|
# * copytruncates files having (by default) .log extesion and compresses the copy
|
||||||
|
# (and returns with 1 exit code, thus, the logrotate will skip this file).
|
||||||
|
# * does nothing with files having any other extensions - e.g .1, .2 and so on
|
||||||
|
# (and returns with 0 exit code, thus, the logrotate can process this file);
|
||||||
|
prerotate
|
||||||
|
$WORKERSCRIPT \$1
|
||||||
|
endscript
|
||||||
|
# Only if the prerotate script didn't process it yet.
|
||||||
|
copytruncate
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Rotates the logs.
|
||||||
|
"$LOGROTATE" -s "$STATEFILE" "$CONFFILE"
|
||||||
|
|
||||||
|
# That's all, Folks! :)
|
1
tools/shutdown
Symbolic link
1
tools/shutdown
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.launcher
|
68
tools/shutdown.d/100-docker-compose.sh
Executable file
68
tools/shutdown.d/100-docker-compose.sh
Executable file
@ -0,0 +1,68 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Stops a dockerized service defined by a docker-compose.yml file.
|
||||||
|
# This is a general purpose stopper, doesn't requires customization.
|
||||||
|
# Actually it is a humble wrapper script for a 'docker-compose down'
|
||||||
|
# command.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2021-09-02 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
PAR_BASEDIR=${PAR_BASEDIR:-""} # Service's base folder.
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
YMLFILE="docker-compose.yml"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_DOESNOTRUN="This service doesn't run."
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_MISSINGYML="Fatal: didn't find the docker-compose.yml file"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in dirname docker-compose readlink
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]' | "$TR" '-' '_')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
SOURCE="$0"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SOURCE="$("$READLINK" "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
|
||||||
|
# Searches the base folder, containing a docker-compose.yml file.
|
||||||
|
# Called from the base folder (./)?
|
||||||
|
BASE_DIR="$PAR_BASEDIR"
|
||||||
|
TEST_DIR="$SCRPATH"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools/*.d?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# On failure gives it up here.
|
||||||
|
if [ -z "$BASE_DIR" -o ! -r "$BASE_DIR/$YMLFILE" ]; then
|
||||||
|
echo "$MSG_MISSINGYML" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Doesn't stop if it isn't running.
|
||||||
|
if [ -z "$(cd "$BASE_DIR"; "$DOCKER_COMPOSE" ps --services --filter "status=running")" ]; then
|
||||||
|
echo "$MSG_DOESNOTRUN" >&2; exit 1
|
||||||
|
fi
|
||||||
|
# Stops the service.
|
||||||
|
(cd "$BASE_DIR"; "$DOCKER_COMPOSE" down)
|
||||||
|
|
||||||
|
# That's all, Folks!
|
19
tools/shutdown.d/110-notification-email.sh
Executable file
19
tools/shutdown.d/110-notification-email.sh
Executable file
@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Sends a mail message about shutdown to the Linux user itself.
|
||||||
|
# Hopes the appropriate message forward rule has been set.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2021-09-05 v0.1 Initial release
|
||||||
|
|
||||||
|
MAIL=$(which mail)
|
||||||
|
if [ -n "$MAIL" -a -x "$MAIL" ]; then
|
||||||
|
subject="[Maintenance] A Docker service has been stopped intentionally on $HOSTNAME"
|
||||||
|
message="This is a message from $0 script on $HOSTNAME.\n"
|
||||||
|
message+="This Docker service has been stopped intentionally few seconds ago.\n\n"
|
||||||
|
message+="Best regards: the Maintenance Bot"
|
||||||
|
echo -e "$message" | "$MAIL" -s "$subject" "$USER"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# That's all, Folks!
|
1
tools/startup
Symbolic link
1
tools/startup
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
.launcher
|
88
tools/startup.d/100-docker-compose-withlogs.sh
Executable file
88
tools/startup.d/100-docker-compose-withlogs.sh
Executable file
@ -0,0 +1,88 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Launches a dockerized service defined by a docker-compose.yml file.
|
||||||
|
# This is a general purpose launcher, doesn't requires customization.
|
||||||
|
# Actually it is a humble wrapper script for a 'docker-compose up -d'
|
||||||
|
# command. Additionally sets up an aggregated logfile or per-service
|
||||||
|
# logfiles and prevents the multiple launch attempts.
|
||||||
|
#
|
||||||
|
# Author: Kovács Zoltán <kovacs.zoltan@smartfront.hu>
|
||||||
|
# License: GNU/GPL v3+ (https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
# 2021-09-02 v0.1 Initial release
|
||||||
|
|
||||||
|
# Accepted environment variables and their defaults.
|
||||||
|
PAR_BASEDIR=${PAR_BASEDIR:-""} # Service's base folder
|
||||||
|
PAR_AGGLOGS=${PAR_AGGLOGS:-""} # Not empty for aggregated logs
|
||||||
|
|
||||||
|
# Other initialisations.
|
||||||
|
LOGDIR="logs"
|
||||||
|
LOGFILE="logs/docker-compose.log" # For aggregated log
|
||||||
|
YMLFILE="docker-compose.yml"
|
||||||
|
|
||||||
|
# Messages.
|
||||||
|
MSG_ALREADYRUN="This service is already running."
|
||||||
|
MSG_MISSINGDEP="Fatal: missing dependency"
|
||||||
|
MSG_MISSINGYML="Fatal: didn't find the docker-compose.yml file"
|
||||||
|
|
||||||
|
# Checks the dependencies.
|
||||||
|
TR=$(which tr 2>/dev/null)
|
||||||
|
if [ -z "$TR" ]; then echo "$MSG_MISSINGDEP tr."; exit 1 ; fi
|
||||||
|
for item in dirname docker-compose readlink
|
||||||
|
do
|
||||||
|
if [ -n "$(which $item)" ]
|
||||||
|
then export $(echo $item | "$TR" '[:lower:]' '[:upper:]' | "$TR" '-' '_')=$(which $item)
|
||||||
|
else echo "$MSG_MISSINGDEP $item." >&2; exit 1; fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Where I'm?
|
||||||
|
# https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
|
||||||
|
SOURCE="$0"
|
||||||
|
while [ -h "$SOURCE" ]; do
|
||||||
|
# resolve $SOURCE until the file is no longer a symlink
|
||||||
|
SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
SOURCE="$("$READLINK" "$SOURCE")"
|
||||||
|
# if $SOURCE was a relative symlink, we need to resolve it
|
||||||
|
# relative to the path where the symlink file was located
|
||||||
|
[[ $SOURCE != /* ]] && SOURCE="$SCRPATH/$SOURCE"
|
||||||
|
done; SCRPATH="$( cd -P "$("$DIRNAME" "$SOURCE" )" && pwd )" #"
|
||||||
|
|
||||||
|
# Searches the base folder, containing a docker-compose.yml file.
|
||||||
|
# Called from the base folder (./)?
|
||||||
|
BASE_DIR="$PAR_BASEDIR"
|
||||||
|
TEST_DIR="$SCRPATH"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# Called from ./tools/*.d?
|
||||||
|
TEST_DIR="$("$DIRNAME" "$TEST_DIR")"
|
||||||
|
[[ -z "$BASE_DIR" ]] && [[ -r "$TEST_DIR/$YMLFILE" ]] && BASE_DIR="$TEST_DIR"
|
||||||
|
# On failure gives it up here.
|
||||||
|
if [ -z "$BASE_DIR" -o ! -r "$BASE_DIR/$YMLFILE" ]; then
|
||||||
|
echo "$MSG_MISSINGYML" >&2; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Doesn't start if it is already running.
|
||||||
|
if [ -n "$(cd "$BASE_DIR"; "$DOCKER_COMPOSE" ps --services --filter "status=running")" ]; then
|
||||||
|
echo "$MSG_ALREADYRUN" >&2; exit 1
|
||||||
|
fi
|
||||||
|
# Starts the service.
|
||||||
|
(cd "$BASE_DIR"; "$DOCKER_COMPOSE" up -d)
|
||||||
|
|
||||||
|
|
||||||
|
# Starts the logger - this/these process(es) will automatically terminate
|
||||||
|
# when the docker-compose stops.
|
||||||
|
if [ -n "$PAR_AGGLOGS" ]; then
|
||||||
|
# Aggregated logs
|
||||||
|
(cd "$BASE_DIR"; "$DOCKER_COMPOSE" logs --no-color -t -f >> "$BASE_DIR/$LOGFILE" &)
|
||||||
|
else
|
||||||
|
# Separate logs, each for every running service.
|
||||||
|
for service in $(cd "$BASE_DIR"; "$DOCKER_COMPOSE" ps --services) ""
|
||||||
|
do
|
||||||
|
if [ -n "$service" ]; then
|
||||||
|
(cd "$BASE_DIR"; "$DOCKER_COMPOSE" logs --no-color -t -f $service >> "$BASE_DIR/$LOGDIR/$service.log" &)
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# That's all, Folks!
|
Loading…
x
Reference in New Issue
Block a user