Compare commits

...

12 Commits

12 changed files with 213 additions and 69 deletions

View File

@@ -1,45 +0,0 @@
name: "Caprover: Build & Deploy (backup-databases)"
on:
push:
branches: [ "main", "dev" ]
pull_request:
branches: [ "main", "dev" ]
#env:
# API_URL: ${{ github.ref_name == 'main' && vars.API_URL && vars.API_URL || vars.DEV_API_URL }}
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Switch to branch ${{ github.ref_name }}
uses: actions/checkout@v4
- name: Pack app contents
run: |
tar -czv \
-f deploy.tar \
./captain-definition \
./Dockerfile \
./scripts
- name: Deploy webapp to development
if: github.ref_name == 'dev'
uses: caprover/deploy-from-github@v1.0.1
with:
server: "${{ vars.DEV_CAPROVER_SERVER || vars.CAPROVER_SERVER }}"
app: "${{ vars.DEV_APP_NAME || vars.APP_NAME }}"
token: "${{ secrets.DEV_APP_TOKEN }}"
- name: Deploy webapp to production
if: github.ref_name == 'main'
uses: caprover/deploy-from-github@v1.0.1
with:
server: "${{ vars.CAPROVER_SERVER }}"
app: "${{ vars.APP_NAME }}"
token: "${{ secrets.APP_TOKEN }}"

View File

@@ -5,11 +5,16 @@ RUN chmod +x /usr/local/bin/*.sh
RUN mkdir -p ${BACKUP_PATH:-/var/data/backup}
ARG OFELIA_CONFIG_PATH=/etc/ofelia/config.ini
ARG DEBUG
ARG BACKUP_DATABASES
ENV BACKUP_DATABASES=${BACKUP_DATABASES}
ARG OFELIA_CONFIG_PATH=/etc/ofelia/config.ini
ENV OFELIA_CONFIG_PATH=${OFELIA_CONFIG_PATH}
ARG DEBUG
ENV DEBUG=${DEBUG}
ARG BACKUP_DATABASES_PATH=/var/data/backup-databases
ENV BACKUP_DATABASES_PATH=${BACKUP_DATABASES_PATH}
RUN ofelia-config.sh --save
ENTRYPOINT docker-entrypoint.sh

View File

@@ -25,3 +25,55 @@ mysql://db_user:123456@mydatabase.com.br 0 0 1 * * *
- *schedule*: 0 0 1 * * * (in (GO implementation cron format)[https://godoc.org/github.com/robfig/cron])meaning on this case every night at 01h00)
mysql://db_user@mydatabase.com.br/* @daily
- *schedule*: @daily (onde a day starting 24h from starting)
### Remote volumes
A presistência dos dados backupeados depende obviamente da criação de um volume persistente no Docker. O ideal é que esse volume esteja fora do servidor. Um serviço compatível com s3 pode ser um bom local de armazenamento e para essa possibilidade incluimos scripts de configuração para os serviços s3 mais comuns (nesse caso, mas comuns pra nós).
Para que o volume esteja disponível em todos os nós do cluster nós precisamos fazer a configuração previamente em cada servidor, instalando no docker um plugin que vai armazenar as informações de acesso ao serviço s3. O exemplo abaixo ilustra a instalação do plugin para acesso a uma conta no Cloudflare R2.
```bash
setup-cloudflare-r2.sh --account-id="<ID da conta>"\
--key="<Chave de Acesso>"\
--secret="<Chave secreta>"\
--alias="backup-bucket";
```
A configuração de um bucket no Linode Objects também está coberta por um script próprio. No futuro esses scripts serão unificados num mesmo processo por isso evite fazer qualquer automatização confiando na existência deles. O conteúdo dos scritps é simples e pode ser facilmente adaptado para outros serviços.
A montagem do bucket é feita pela API "Service Update" do Docker, por meio da interface que o Caprover oferece. O JSON abaixo montará um bucket chamado "backup" criado no serviço s3 configurado pelo plugin com nome ou alias "backup-bucket" no caminho `/var/data/backup-databases`.
```JSON
{
"TaskTemplate": {
"ContainerSpec": {
"Mounts": [
{
"Type": "volume",
"Source": "backup",
"Target": "/var/data/backup-databases",
"VolumeOptions": {
"DriverConfig": {
"Name": "backup-bucket"
}
}
}
]
}
}
}
```
O formato YML também deveria ser válido mas nas versões atuais do Caprover a configuração abaixo foi ignorada
```YML
TaskTemplate:
ContainerSpec:
Mounts:
- Type: volume
Source: backup
Target: /var/data/backup-databases
VolumeOptions:
DriverConfig:
Name: backup-bucket
```

97
scripts/backup-mariadb.sh Normal file
View File

@@ -0,0 +1,97 @@
#! /bin/sh
#
# MariaDB Backup
# Do backup of one or all databases of a given MariaDB server
#
#
# List Databases
#
# only argument is a database url
DB_URL="$1";
# load database values as environment variables while checking URL validity
#
# will populate variables
# - $DB_USERNAME
# - $DB_PASSWORD
# - $DB_HOST
# - $DB_PORT
# - $DB_NAME
# - any query var as a prefixed variable $db_arg_${name}=${value}
dotenv_path="/tmp/ofelia-$(
tr -dc A-Za-z0-9 </dev/urandom | head -c 16; echo
).dotenv"
dburl-parser.sh ${DB_URL} > ${dotenv_path};
source ${dotenv_path};
rm ${dotenv_path};
# set backup destination
if test -z "${BACKUP_DATABASES_PATH}"; then
echo "failed initializing backup -- \$BACKUP_DATABASES_PATH is empty";
exit 1;
fi
## set subdirectory
backup_path="/$(
echo "/${BACKUP_DATABASES_PATH}/${db_arg_directory:-\/}/"\
| sed 's/\/\{1,\}/\//g'\
| sed 's/^\/\(.*\)\/$/\1/'
)/"
## directory path
mkdir -p ${backup_path};
## file name
backup_prefix="${db_arg_prefix:-backup-}";
backup_extension="${db_arg_file_extension:-SQL}";
# set databases list
if test "${DB_NAME}" = '*'; then
## build a list with every database name if * is informed
databases=$(mysql\
--user=${DB_USERNAME}\
--password=${DB_PASSWORD}\
--host="${DB_HOST}"\
--execute="SHOW DATABASES;"\
--vertical\
--column-names=false\
| grep -ve ^\*\
| xargs
);
else
## use the database informed or force using --all-databases param
databases="${DB_NAME:-::ALL_DATABASES::}";
fi
# proccess each database
for database_item in ${databases}; do
if test -z "${database_item}"; then
## skip empty lines
continue;
elif test "${database_item}" = "::ALL_DATABASES::"; then
# backup everything in a single file if no database was set
database="--all-databases";
filename_database="all-databases";
else
# backup each database name to its own file
database="--databases ${database_item}";
filename_database="${database_item}";
fi
cat | xargs mariadb-dump << HEREDOC
$(test -n ${DB_USERNAME} && echo "--user=${DB_USERNAME}")
$(test -n ${DB_PASSWORD} && echo "--password=${DB_PASSWORD}")
$(test -n ${DB_HOST} && echo "--host=${DB_HOST}")
$(test -n ${DB_PORT} && echo "--port=${DB_PORT}")
--result-file=$(
printf '%s/%s%s-%s_%s.%s'\
${backup_path}\
${backup_prefix}\
${DB_HOST}\
${filename_database}\
$(date +%s)\
${backup_extension}
)
${database}
HEREDOC
done

View File

@@ -21,9 +21,12 @@ DB_URL="$1";
# - $DB_PORT
# - $DB_NAME
# - any query var as a prefixed variable $db_arg_${name}=${value}
dburl-parser.sh ${DB_URL} > /tmp/ofelia.dotenv;
source /tmp/ofelia.dotenv;
rm /tmp/ofelia.dotenv;
dotenv_path="/tmp/ofelia-$(
tr -dc A-Za-z0-9 </dev/urandom | head -c 16; echo
).dotenv"
dburl-parser.sh ${DB_URL} > ${dotenv_path};
source ${dotenv_path};
rm ${dotenv_path};
# set backup destination
if test -z "${BACKUP_DATABASES_PATH}"; then

View File

@@ -14,6 +14,11 @@ queryless_url="${DB_URL%%\?*}";
# get URL's schema
schema="${queryless_url%//*}";
if test "${schema}" = "${queryless_url}"; then
echo >&2 "database URLs must start with schema (i.e.: mysql://)"\
&& echo >&2 "URL_INVALID_ERROR: \"${DB_URL}\""\
&& exit 2;
fi
schemaless_url=${queryless_url#*//};
# left hand is everything until the first @ (the user part)

View File

@@ -0,0 +1,11 @@
#! /bin/sh
#
# Setup and trigger ofelia
#
# config and run ofelia
ofelia-config.sh --save || exit 1;
# run ofelia
/usr/bin/ofelia daemon --config="${OFELIA_CONFIG_PATH}";

View File

@@ -0,0 +1,10 @@
#! /bin/sh
#
# MySQL install tools
# Install mysql and mysqldump to container
# - mariadb-client (mariadb tools)
# - mariadb-connector-c (fills the gap with new sha password protocol)
#
apk update && apk add mariadb-client mariadb-connector-c;

View File

@@ -3,6 +3,8 @@
#
# MySQL install tools
# Install mysql and mysqldump to container
# - mysql-client (mariadb tools)
# - mariadb-connector-c (fills the gap with new mysql sha password protocol)
#
apk update && apk add mysql-client;
apk update && apk add mysql-client mariadb-connector-c;

View File

@@ -16,7 +16,7 @@ while test $# -gt 0; do
shift;
if test "${arg_value}" = "--save"; then
output_path=${OFELIA_CONFIG_PATH:-/etc/ofelia/config.ini};
mkdir $(dirname $output_path);
mkdir -p $(dirname $output_path);
touch ${output_path};
fi
done
@@ -24,9 +24,9 @@ if test -z "${output_path}"; then
output_path=/dev/stdout;
fi
for entry in "${BACKUP_DATABASES}"; do
echo "${BACKUP_DATABASES}" | while read entry; do
# normalize spaces
backup_data=$(echo ${entry} | xargs);
backup_data=$(echo "${entry}" | xargs);
# skip if it is a empty line
if test -z "${backup_data}"; then
continue;
@@ -36,7 +36,7 @@ for entry in "${BACKUP_DATABASES}"; do
# remove everything before the first space to get cron label
BACKUP_SCHEDULE=${backup_data#* };
# load url data as variables
dburl-parser.sh ${DB_URL} > /tmp/ofelia.dotenv;
dburl-parser.sh ${DB_URL} > /tmp/ofelia.dotenv || exit 1;
source /tmp/ofelia.dotenv;
rm /tmp/ofelia.dotenv;
# fail if $DB_URL has no schema

View File

@@ -3,7 +3,7 @@
#
# Connect to a Cloudflare's R2 storage using mochoa/s3fs
#
# Run this script on the Docker HOST to bind buckets using "linode-bucket"
# Run this script on the Docker HOST to bind buckets using "cloudflare-r2"
# -- Or any alias you give with "--alias" option -- driver name.
# Please refer to https://hub.docker.com/r/mochoa/s3fs-volume-plugin.
# You may bind to different buckets using different aliases.
@@ -30,16 +30,18 @@ while test $# -ne 0; do
elif test "${key}" = "-a" || test "${key}" = "--alias"; then
PLUGIN_ALIAS="${value}";
elif test "${key}" = "-u" || test "${key}" = "--user"; then
if id "${value}" > /dev/null 2>&1; then
USER_ID="$(id -u "${value}")";
else
passwdEntry=$(getent passwd ${value} || echo ":NOT_FOUND:");
if test "${passwdEntry}" = ":NOT_FOUND:"; then
USER_ID="$(id -u)";
else
USER_ID=$(echo $passwdEntry | cut -d ':' -f 3);
fi
elif test "${key}" = "-g" || test "${key}" = "--group"; then
if id "${value}" > /dev/null 2>&1; then
GROUP_ID="$(id -g "${value}")";
else
groupEntry=$(getent group ${value} || echo ":NOT_FOUND:");
if test "${groupEntry}" = ":NOT_FOUND:"; then
GROUP_ID="$(id -g)";
else
GROUP_ID=$(echo $groupEntry | cut -d ':' -f 3);
fi
else
echo "Invalid \"${key}\" option";

View File

@@ -28,16 +28,18 @@ while test $# -ne 0; do
elif test "${key}" = "-a" || test "${key}" = "--alias"; then
PLUGIN_ALIAS="${value}";
elif test "${key}" = "-u" || test "${key}" = "--user"; then
if id "${value}" > /dev/null 2>&1; then
USER_ID="$(id -u "${value}")";
else
passwdEntry=$(getent passwd ${value} || echo ":NOT_FOUND:");
if test "${passwdEntry}" = ":NOT_FOUND:"; then
USER_ID="$(id -u)";
else
USER_ID=$(echo $passwdEntry | cut -d ':' -f 3);
fi
elif test "${key}" = "-g" || test "${key}" = "--group"; then
if id "${value}" > /dev/null 2>&1; then
GROUP_ID="$(id -g "${value}")";
else
groupEntry=$(getent group ${value} || echo ":NOT_FOUND:");
if test "${groupEntry}" = ":NOT_FOUND:"; then
GROUP_ID="$(id -g)";
else
GROUP_ID=$(echo $groupEntry | cut -d ':' -f 3);
fi
else
echo "Invalid \"${key}\" option";