Compare commits

..

24 Commits

Author SHA1 Message Date
81311cf1bc Merge remote-tracking branch 'gitea/master' 2025-07-15 10:27:05 +02:00
f6e70d5870 Change to latest compose, use affine container 2025-07-15 10:26:24 +02:00
6da0b17b99 add airtrail-wip, but I don't think I'll deploy it 2025-06-11 20:47:46 +02:00
94166f5ce3 Use pgvector as according to affine. be careful 2025-06-10 12:51:44 +02:00
e707fac67b update latest compose file according to affine 2025-06-10 12:40:19 +02:00
32627f8839 in 0.22.1 affine index should be set to off 2025-06-05 00:11:37 +02:00
7164535367 Add memos 2025-06-04 17:49:08 +02:00
250886a0ae Add center no nat 2025-05-22 13:53:08 +02:00
61e1204536 modify install 2025-05-21 22:04:15 +02:00
ed70639832 Without using config seems to work as well. 2025-05-21 21:53:29 +02:00
aeeb045439 add gitea runner 2025-05-21 19:07:12 +00:00
80dafbbce2 now it should work well 2025-05-19 09:56:02 +00:00
68877fda84 Change variable name peer to my_ip 2025-05-19 09:46:12 +00:00
f18e1c3b86 wg peer setup also use env sh 2025-05-19 09:44:02 +00:00
99264fdd55 For some reason gitea container needs to use non 22 port 2025-05-16 11:39:33 +02:00
1a73b30ac7 userns 2025-05-15 20:38:59 +02:00
68794a2e77 db can keep id no problem 2025-05-15 20:26:41 +02:00
0f2b96ebee Merge remote-tracking branch 'origin/master' 2025-05-15 17:49:30 +02:00
f52531eeb2 full 2025-05-15 17:49:01 +02:00
fba074bb00 should be working gitea 2025-05-15 17:48:49 +02:00
dcce27f1b7 no keep userns 2025-05-15 17:06:10 +02:00
e75e5e0e37 keep user ns 2025-05-15 16:57:36 +02:00
6ea7247613 first working version of gitea, now to add backup 2025-05-15 16:05:21 +02:00
461d6b8bb6 use variable 2025-05-15 16:05:09 +02:00
25 changed files with 1010 additions and 32 deletions

View File

@@ -1,7 +1,7 @@
name: affine
services:
affine:
image: ghcr.io/toeverything/affine-graphql:${AFFINE_REVISION:-stable}
image: ghcr.io/toeverything/affine:${AFFINE_REVISION:-stable}
container_name: affine_server
ports:
- '${PORT:-3010}:3010'
@@ -21,10 +21,11 @@ services:
environment:
- REDIS_SERVER_HOST=redis
- DATABASE_URL=postgresql://${DB_USERNAME}:${DB_PASSWORD}@postgres:5432/${DB_DATABASE:-affine}
- AFFINE_INDEXER_ENABLED=false
restart: unless-stopped
affine_migration:
image: ghcr.io/toeverything/affine-graphql:${AFFINE_REVISION:-stable}
image: ghcr.io/toeverything/affine:${AFFINE_REVISION:-stable}
container_name: affine_migration_job
volumes:
# custom configurations
@@ -36,6 +37,7 @@ services:
environment:
- REDIS_SERVER_HOST=redis
- DATABASE_URL=postgresql://${DB_USERNAME}:${DB_PASSWORD}@postgres:5432/${DB_DATABASE:-affine}
- AFFINE_INDEXER_ENABLED=false
depends_on:
postgres:
condition: service_healthy
@@ -53,7 +55,7 @@ services:
restart: unless-stopped
postgres:
image: postgres:16
image: pgvector/pgvector:pg16
container_name: affine_postgres
volumes:
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
@@ -71,4 +73,4 @@ services:
interval: 10s
timeout: 5s
retries: 5
restart: unless-stopped
restart: unless-stopped

View File

@@ -48,25 +48,26 @@ podman create \
podman generate systemd \
--new \
--name $CONTAINER_REDIS \
--files --restart-policy always --container-prefix=affine > /dev/null
--files --restart-policy always --container-prefix=$CONTAINER_PREFIX > /dev/null
mv $CONTAINER_PREFIX-$CONTAINER_REDIS.service ./systemd-units/
podman create \
--name $CONTAINER_POSTGRES \
--network $NETWORK \
-p $DATABASE_PORT:$DATABASE_PORT \
--userns keep-id \
-p $DATABASE_PORT:5432 \
-e POSTGRES_USER=$DB_USERNAME \
-e POSTGRES_PASSWORD=$DB_PASSWORD \
-e POSTGRES_DB=$DB_DATABASE \
-e POSTGRES_HOST_AUTH_METHOD=trust \
-v "$DB_DATA_LOCATION:/var/lib/postgresql/data:Z" \
docker.io/library/postgres:16
docker.io/pgvector/pgvector:pg16
podman generate systemd \
--new \
--name $CONTAINER_POSTGRES \
--files --restart-policy always --container-prefix=affine > /dev/null
--files --restart-policy always --container-prefix=$CONTAINER_PREFIX > /dev/null
mv $CONTAINER_PREFIX-$CONTAINER_POSTGRES.service ./systemd-units/
mkdir -p $USER_SYSTEMD
@@ -91,9 +92,10 @@ podman run --rm \
--network $NETWORK \
-e REDIS_SERVER_HOST=$REDIS_SERVER_HOST \
-e DATABASE_URL="postgresql://$DB_USERNAME:$DB_PASSWORD@$DATABASE_HOST:$DATABASE_PORT/$DB_DATABASE" \
-e AFFINE_INDEXER_ENABLED=false \
-v "$UPLOAD_LOCATION:/root/.affine/storage:Z" \
-v "$CONFIG_LOCATION:/root/.affine/config:Z" \
ghcr.io/toeverything/affine-graphql:$AFFINE_REVISION \
ghcr.io/toeverything/affine:$AFFINE_REVISION \
sh -c 'node ./scripts/self-host-predeploy.js'
@@ -108,14 +110,15 @@ podman create \
-e MAILER_PORT=$SMTP_PORT \
-e MAILER_USER=$SMTP_USERNAME \
-e MAILER_PASSWORD=$SMTP_PASSWORD \
-e AFFINE_INDEXER_ENABLED=false \
-v "$UPLOAD_LOCATION:/root/.affine/storage:Z" \
-v "$CONFIG_LOCATION:/root/.affine/config:Z" \
ghcr.io/toeverything/affine-graphql:$AFFINE_REVISION
ghcr.io/toeverything/affine:$AFFINE_REVISION
podman generate systemd \
--new \
--name $CONTAINER_SERVER \
--files --restart-policy always --container-prefix=affine > /dev/null
--files --restart-policy always --container-prefix=$CONTAINER_PREFIX > /dev/null
mv $CONTAINER_PREFIX-$CONTAINER_SERVER.service ./systemd-units/
sed -i "/^\[Unit\]/a After=$CONTAINER_PREFIX-$CONTAINER_POSTGRES.service $CONTAINER_PREFIX-$CONTAINER_REDIS.service\nRequires=$CONTAINER_PREFIX-$CONTAINER_POSTGRES.service $CONTAINER_PREFIX-$CONTAINER_REDIS.service" ./systemd-units/$CONTAINER_PREFIX-$CONTAINER_SERVER.service

32
airtrail-wip/compose.yml Normal file
View File

@@ -0,0 +1,32 @@
services:
db:
container_name: airtrail_db
image: postgres:16-alpine
restart: always
env_file:
- .env
environment:
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- db_data:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U ${DB_USERNAME} -d ${DB_DATABASE_NAME}']
interval: 5s
timeout: 5s
retries: 5
airtrail:
container_name: airtrail
image: johly/airtrail:latest
restart: always
env_file:
- .env
ports:
- 3000:3000
depends_on:
db:
condition: service_healthy
volumes:
db_data:

View File

@@ -0,0 +1,25 @@
# Your domain, e.g https://example.com
# You might have to add :443 if you are using https through a reverse proxy
ORIGIN=http://localhost:3000
# If you need to provide multiple domains, uncomment and pass a comma-separated list to ORIGINS instead (replace ORIGIN)
# ORIGINS=http://localhost:3000,https://flights.example.com
# The database URL used by the application.
# If you are using the provided docker-compose file, you should only change the "password" part of the URL
# If you are using your own database, you should change this to the correct URL
#
DB_URL=postgres://airtrail:password@db:5432/airtrail
# ∧∧
# Change "db" to "localhost" if you are developing locally
# Values below this line are only for the default provided postgres database
###################################################################################
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
# When you change the DB_PASSWORD, you should also update the DB_URL accordingly
DB_PASSWORD=password
# The values below this line do not need to be changed
###################################################################################
DB_DATABASE_NAME=airtrail
DB_USERNAME=airtrail

121
airtrail-wip/deploy.sh Executable file
View File

@@ -0,0 +1,121 @@
#!/bin/bash
set -eu
. ./env.sh
services=("$CONTAINER_PREFIX-$CONTAINER_SERVICE.service"
"$CONTAINER_PREFIX-$CONTAINER_DB.service"
)
for service in "${services[@]}"; do
if systemctl --user list-units --full --all | grep -q "$service"; then
echo "Stopping $service..."
systemctl --user stop $service
echo "$service stopped."
fi
done
containers=(
"$CONTAINER_SERVER"
"$CONTAINER_DB"
)
for container in "${containers[@]}"; do
if podman container exists "$container"; then
echo "Stop and delete existing container $container"
if podman inspect -f '{{.State.Running}}' "$container" | grep -q true; then
podman stop "$container"
fi
podman rm "$container"
fi
done
mkdir -p "$APP_ROOT"
mkdir -p "$DB_DIR"
if ! podman network exists "$NETWORK_NAME"; then
podman network create "$NETWORK_NAME"
fi
podman create \
--name "$CONTAINER_DB" \
--network "$NETWORK_NAME" \
--userns=keep-id \
--restart=always \
-p "$DB_PORT:5432" \
-e POSTGRES_USER="$DB_USER" \
-e POSTGRES_PASSWORD="$DB_PASSWORD" \
-e POSTGRES_DB="$DB_NAME" \
-e POSTGRES_HOST_AUTH_METHOD=trust \
-v "$DB_DIR:/var/lib/postgresql/data:Z" \
docker.io/library/postgres:16-alpine
podman generate systemd \
--new \
--name "$CONTAINER_DB" \
--files --restart-policy always --container-prefix="$CONTAINER_PREFIX" > /dev/null
mv "$CONTAINER_PREFIX-$CONTAINER_DB.service" "$USER_SYSTEMD"
systemctl --user daemon-reload
systemctl --user enable --now "$CONTAINER_PREFIX-$CONTAINER_DB.service"
echo "Waiting for database to be ready..."
until podman exec "$CONTAINER_DB" pg_isready -U "$DB_USER" -d "$DB_NAME"; do
sleep 1
done
echo "Database is ready."
podman create \
--name "$CONTAINER_SERVER" \
--network "$NETWORK_NAME" \
--restart=always \
-e DB_URL="postgres://$DB_USER:$DB_PASSWORD@$DB_HOST:$DB_PORT/$DB_NAME" \
-e ORIGIN="https://$DOMAIN" \
-p "$APP_PORT:3000" \
docker.io/johly/airtrail:latest
podman generate systemd \
--new \
--name "$CONTAINER_SERVER" \
--files \
--restart-policy always \
--container-prefix="$CONTAINER_PREFIX"
sed -i "/^\[Unit\]/a After=$CONTAINER_PREFIX-$CONTAINER_DB.service \nRequires=$CONTAINER_PREFIX-$CONTAINER_DB.service" $CONTAINER_PREFIX-$CONTAINER_SERVICE.service
mv "$CONTAINER_PREFIX-$CONTAINER_SERVER.service" "$USER_SYSTEMD"
systemctl --user daemon-reload
systemctl --user enable --now "$CONTAINER_PREFIX-$CONTAINER_SERVER.service"
sudo loginctl enable-linger "$USER"
# generate haproxy config
sudo mkdir -p $HAPROXY_SERVICE_DIR
echo "crt $SSL_PATH/fullchain.pem" | sudo tee $HAPROXY_SERVICE_DIR/cert.block > /dev/null
ACL_CFG=$(cat <<EOF
acl is_airtrail hdr(host) -i $DOMAIN
use_backend airtrail_backend if is_airtrail
EOF
)
echo "$ACL_CFG" | sudo tee -a $HAPROXY_SERVICE_DIR/acl.block > /dev/null
BACKEND_CFG=$(cat <<EOF
backend airtrail_backend
mode http
option httpchk GET /login HTTP/1.1\r\nHost:\ $DOMAIN
option forwardfor
option http-server-close
server airtrailhttp 127.0.0.1:$APP_PORT alpn http/1.1 check
# === CORS & proxy headers ===
http-request set-header X-Forwarded-For %[src]
http-request set-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Host %[req.hdr(Host)]
http-request set-header X-Real-IP %[src]
# === WebSocket support ===
http-request set-header Connection "upgrade" if { req.hdr(Upgrade) -i websocket }
http-request set-header Upgrade "%[req.hdr(Upgrade)]" if { req.hdr(Upgrade) -i websocket }
EOF
)
echo "$BACKEND_CFG" | sudo tee -a $HAPROXY_SERVICE_DIR/backend.block > /dev/null
echo "Deployment completed successfully, run haproxy config to generate the final config file."

23
airtrail-wip/env.sh Normal file
View File

@@ -0,0 +1,23 @@
# port range 57xx
DOMAIN=""
APP_PORT=5730
APP_ROOT="$HOME/.local/share/airtrail"
DB_DIR="$APP_ROOT/db"
CONTAINER_SERVICE="airtrail_server"
CONTAINER_DB="airtrail_db"
NETWORK_NAME="airtrail_network"
DB_HOST="host.containers.internal"
DB_PORT=5731
DB_USER="airtrail"
DB_PASSWORD="airtrail"
DB_NAME="airtrail"
CONTAINER_PREFIX="airtrail"
CONTAINER_SERVER="airtrail_server"
CONTAINER_DB="airtrail_db"
USER_SYSTEMD="$HOME/.config/systemd/user"
SSL_PATH=$HOME/.config/ssl/$DOMAIN
HAPROXY_CFG_DIR="/etc/haproxy"
HAPROXY_CFG="$HAPROXY_CFG_DIR/haproxy.cfg"
HAPROXY_SERVICE_DIR="$HAPROXY_CFG_DIR/services/$DOMAIN"

36
airtrail-wip/uninstall.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
set -eu
. ./env.sh
services=("$CONTAINER_PREFIX-$CONTAINER_SERVICE.service"
"$CONTAINER_PREFIX-$CONTAINER_DB.service"
)
for service in "${services[@]}"; do
if systemctl --user list-units --full --all | grep -q "$service"; then
echo "Stopping $service..."
systemctl --user stop $service
echo "$service stopped."
fi
done
containers=(
"$CONTAINER_SERVER"
"$CONTAINER_DB"
)
for container in "${containers[@]}"; do
if podman container exists "$container"; then
echo "Stop and delete existing container $container"
if podman inspect -f '{{.State.Running}}' "$container" | grep -q true; then
podman stop "$container"
fi
podman rm "$container"
fi
done
for service in "${services[@]}"; do
systemctl --user disable --now $service
rm $USER_SYSTEMD/$service
done
sudo rm -rf $HAPROXY_CFG_DIR/services/$DOMAIN

39
gitea/backup.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Note: use rclone for backup, needs manually configuration.
export XDG_RUNTIME_DIR="/run/user/$(id -u)"
export DBUS_SESSION_BUS_ADDRESS="unix:path=$XDG_RUNTIME_DIR/bus"
DATA=""
CONFIG=""
DB=""
LOCAL_BACKUP="$HOME/.local/backup"
REMOTE=""
DB_USERNAME=""
DB_PASSWORD=""
DB_DATABASE=""
CONTAINER_DB=""
SERVICE_GITEA=""
STAGING_DIR=$(mktemp -d)
mkdir -p $LOCAL_BACKUP
DATE=$(date +%F-%H-%M-%S)
BACKUP_NAME="backup_$DATE.tar.gz"
systemctl --user stop $SERVICE_GITEA
podman exec $CONTAINER_DB pg_dump -U $DB_USERNAME -F c -d $DB_DATABASE > $STAGING_DIR/db.dump
cp -r "$DATA" "$STAGING_DIR/data"
cp -r "$CONFIG" "$STAGING_DIR/config"
cp -r "$DB" "$STAGING_DIR/db"
tar -czf "$LOCAL_BACKUP/$BACKUP_NAME" -C "$STAGING_DIR" .
ls -1t "$LOCAL_BACKUP"/backup_*.tar.gz | tail -n +6 | xargs -r rm --
/usr/bin/rclone sync $LOCAL_BACKUP $REMOTE > /dev/null
rm -rf $STAGING_DIR
systemctl --user start $SERVICE_GITEA

34
gitea/compose_example.yml Normal file
View File

@@ -0,0 +1,34 @@
version: "2"
services:
server:
image: docker.gitea.com/gitea:1.23.7-rootless
restart: always
environment:
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
volumes:
- ./data:/var/lib/gitea
- ./config:/etc/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:2222"
depends_on:
- db
db:
image: docker.io/library/postgres:14
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
networks:
- gitea
volumes:
- ./postgres:/var/lib/postgresql/data

146
gitea/deploy.sh Executable file
View File

@@ -0,0 +1,146 @@
#!/bin/bash
. ./env.sh
set -e
services=("$CONTAINER_PREFIX-$CONTAINER_GITEA.service"
"$CONTAINER_PREFIX-$CONTAINER_DB.service"
)
for service in "${services[@]}"; do
if systemctl --user list-units --full --all | grep -q "$service"; then
echo "Stopping $service..."
systemctl --user stop $service
echo "$service stopped."
fi
done
containers=(
"$CONTAINER_GITEA"
"$CONTAINER_DB"
)
for container in "${containers[@]}"; do
if podman container exists "$container"; then
echo "Stop and delete existing container $container"
if podman inspect -f '{{.State.Running}}' "$container" | grep -q true; then
podman stop "$container"
fi
podman rm "$container"
fi
done
if ! podman network exists $NETWORK; then
podman network create $NETWORK
fi
mkdir -p $DATA_FOLDER
mkdir -p $CONFIG_FOLDER
mkdir -p $DB_FOLDER
mkdir -p $USER_SYSTEMD
podman create \
--name $CONTAINER_DB \
--network $NETWORK \
--userns=keep-id \
--restart=always \
-p $PORT_DB:5432 \
-e POSTGRES_USER=$DB_USER \
-e POSTGRES_PASSWORD=$DB_PASSWORD \
-e POSTGRES_DB=$DB_NAME \
-e POSTGRES_HOST_AUTH_METHOD=trust \
-v "$DB_FOLDER:/var/lib/postgresql/data:Z" \
docker.io/library/postgres:16
podman generate systemd \
--new \
--name $CONTAINER_DB \
--files --restart-policy always --container-prefix=$CONTAINER_PREFIX > /dev/null
mv $CONTAINER_PREFIX-$CONTAINER_DB.service $USER_SYSTEMD
systemctl --user daemon-reload
systemctl --user enable --now $CONTAINER_PREFIX-$CONTAINER_DB.service
echo "Wait for PostgreSQL..."
until podman exec $CONTAINER_DB pg_isready -U "$DB_USER" -d "$DB_NAME" > /dev/null 2>&1; do
sleep 2
done
echo "PostgreSQL ready"
podman create \
--name $CONTAINER_GITEA \
--network $NETWORK \
--restart=always \
--userns=keep-id \
-p $PORT_WEB:3000 \
-p $PORT_SSH:2222 \
-v $DATA_FOLDER:/var/lib/gitea \
-v $CONFIG_FOLDER:/etc/gitea \
-e USER_UID=1000 \
-e USER_GID=1000 \
-e GITEA__database__DB_TYPE=postgres \
-e GITEA__database__HOST=$HOST_DB:$PORT_DB \
-e GITEA__database__NAME=$DB_NAME \
-e GITEA__database__USER=$DB_USER \
-e GITEA__database__PASSWD=$DB_PASSWORD \
-v /etc/timezone:/etc/timezone:ro \
-v /etc/localtime:/etc/localtime:ro \
docker.gitea.com/gitea:latest-rootless
podman generate systemd \
--new \
--name $CONTAINER_GITEA \
--files \
--container-prefix=$CONTAINER_PREFIX \
--restart-policy=always
sed -i "/^\[Unit\]/a After=$CONTAINER_PREFIX-$CONTAINER_DB.service \nRequires=$CONTAINER_PREFIX-$CONTAINER_DB.service" $CONTAINER_PREFIX-$CONTAINER_GITEA.service
mv $CONTAINER_PREFIX-$CONTAINER_GITEA.service $USER_SYSTEMD
systemctl --user daemon-reload
systemctl --user enable --now $CONTAINER_PREFIX-$CONTAINER_GITEA.service
sudo loginctl enable-linger $USER
# generate haproxy blocks
sudo mkdir -p $SERVICE_DIR
echo "crt $SSL_PATH/fullchain.pem" | sudo tee $SERVICE_DIR/cert.block > /dev/null
ACL_CFG=$(cat <<EOF
acl is_gitee hdr(host) -i $DOMAIN
use_backend gitee_backend if is_gitee
EOF
)
echo "$ACL_CFG" | sudo tee $SERVICE_DIR/acl.block > /dev/null
BACKEND_CFG=$(cat <<EOF
backend gitee_backend
mode http
option httpchk GET /
option forwardfor
# Set the Source IP in the X-Real-IP header
http-request set-header X-Real-IP %[src]
server vwhttp 127.0.0.1:$PORT_WEB alpn http/1.1 check
EOF
)
echo "$BACKEND_CFG" | sudo tee $SERVICE_DIR/backend.block > /dev/null
echo "Generate backup script"
BACKUP_FILE="gitea_backup.sh"
cp backup.sh $BACKUP_FILE
sed -i "s|^DATA=\"\"|DATA=\"$DATA_FOLDER\"|" "$BACKUP_FILE"
sed -i "s|^CONFIG=\"\"|CONFIG=\"$CONFIG_FOLDER\"|" "$BACKUP_FILE"
sed -i "s|^DB=\"\"|DB=\"$DB_FOLDER\"|" "$BACKUP_FILE"
sed -i "s|^DB_USERNAME=\"\"|DB_USERNAME=\"$DB_USER\"|" "$BACKUP_FILE"
sed -i "s|^DB_DATABASE=\"\"|DB_DATABASE=\"$DB_NAME\"|" "$BACKUP_FILE"
sed -i "s|^DB_PASSWORD=\"\"|DB_PASSWORD=\"$DB_PASSWORD\"|" "$BACKUP_FILE"
sed -i "s|^LOCAL_BACKUP=\"\$HOME/.local/backup\"|LOCAL_BACKUP=\"\$HOME/.local/backup/$CONTAINER_PREFIX\"|" "$BACKUP_FILE"
sed -i "s|^CONTAINER_DB=\"\"|CONTAINER_DB=\"$CONTAINER_DB\"|" "$BACKUP_FILE"
sed -i "s|^REMOTE=\"\"|REMOTE=\"$BACKUP_REMOTE\"|" "$BACKUP_FILE"
sed -i "s|^SERVICE_GITEA=\"\"|SERVICE_GITEA=\"${CONTAINER_PREFIX}-${CONTAINER_GITEA}.service\"|" "$BACKUP_FILE"
mv $BACKUP_FILE $GITEA_FOLDER
echo "Backup script generated at $GITEA_FOLDER/$BACKUP_FILE"
echo "Backup script will be run every day at 2:00 AM"
crontab -l | grep -v "$GITEA_FOLDER/$BACKUP_FILE" | crontab -
(crontab -l 2>/dev/null; echo "0 2 * * * $GITEA_FOLDER/$BACKUP_FILE") | crontab -
echo "Backup script added to crontab"
echo "Deploy completed, manually run haproxy to generate new config."

33
gitea/env.sh Normal file
View File

@@ -0,0 +1,33 @@
EMAIL=""
CONTAINER_GITEA="gitea"
CONTAINER_DB="gitea_postgres"
CONTAINER_PREFIX="gitea"
NETWORK="gitea_net"
PORT_WEB="3000"
PORT_SSH="2222"
PORT_DB="5433"
HOST_DB="host.containers.internal"
DB_USER="gitea"
DB_PASSWORD="gitea"
DB_NAME="gitea"
GITEA_FOLDER="$HOME/.local/share/gitee"
DATA_FOLDER="$GITEA_FOLDER/data"
CONFIG_FOLDER="$GITEA_FOLDER/config"
DB_FOLDER="$GITEA_FOLDER/db"
BACKUP_REMOTE="onedrive-tianyu:Backups/gitea"
DOMAIN="codedev.jamesvillage.dev"
SSL_PATH=$HOME/.config/ssl/$DOMAIN
USER_SYSTEMD="$HOME/.config/systemd/user"
HAPROXY_CFG_DIR="/etc/haproxy"
HAPROXY_CFG="$HAPROXY_CFG_DIR/haproxy.cfg"
SERVICE_DIR="$HAPROXY_CFG_DIR/services/$DOMAIN"

43
gitea/uninstall.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
. ./env.sh
services=("$CONTAINER_PREFIX-$CONTAINER_GITEA.service"
"$CONTAINER_PREFIX-$CONTAINER_DB.service"
)
for service in "${services[@]}"; do
if systemctl --user list-units --full --all | grep -q "$service"; then
echo "Stopping $service..."
systemctl --user stop $service
echo "$service stopped."
fi
done
containers=(
"$CONTAINER_GITEA"
"$CONTAINER_DB"
)
for container in "${containers[@]}"; do
if podman container exists "$container"; then
echo "Stop and delete existing container $container"
if podman inspect -f '{{.State.Running}}' "$container" | grep -q true; then
podman stop "$container"
fi
podman rm "$container"
fi
done
for service in "${services[@]}"; do
systemctl --user disable --now $service
rm $USER_SYSTEMD/$service
done
sudo rm -r $SERVICE_DIR
crontab -l | grep -v "$GITEA_FOLDER/$BACKUP_FILE" | crontab -
echo "Uninstall complete. Manually run haproxy config to rebuild config. Manually remove data directory
- $GITEA_FOLDER
- $HOME/.local/backup/$CONTAINER_NAME
if needed."

12
gitea_runner/env.sh Normal file
View File

@@ -0,0 +1,12 @@
SERVICE_NAME="gitea_runner"
USER_SYSTEMD="$HOME/.config/systemd/user"
INSTALL_DIR="$HOME/.local/share/$SERVICE_NAME"
ADDITIONAL_LABLES=(
"linux"
"arm64"
"cloud"
)
CONTAINER_HOST="unix:////run/user/1000/podman/podman.sock"
GITEA_URL="https://gitea.example.com"
GITEA_TOKEN=" <your_gitea_token>"
RUNNER_NAME="gitea-runner"

View File

@@ -0,0 +1,14 @@
[Unit]
Description=Gitea Actions runner
Documentation=https://gitea.com/gitea/act_runner
[Service]
ExecStart=
ExecReload=/bin/kill -s HUP $MAINPID
WorkingDirectory=
TimeoutSec=0
RestartSec=10
Restart=always
[Install]
WantedBy=default.target

88
gitea_runner/install.sh Executable file
View File

@@ -0,0 +1,88 @@
#!/bin/bash
# This script is used to install gitea act_runner
. ./env.sh
set -e
ARCH=$(uname -m)
echo "Architecture $ARCH"
DOWNLOAD_VARIANT=""
ACT_RUNNER_VERSION="0.2.11"
RUNNER="$INSTALL_DIR/act_runner"
if [[ "$ARCH" == "amd64" ]]; then
DOWNLOAD_VARIANT="amd64"
elif [[ "$ARCH" == "aarch64" ]]; then
DOWNLOAD_VARIANT="arm64"
else
echo "Unsupported architecture: $ARCH"
exit 1
fi
DOWNLOAD_URL="https://dl.gitea.com/act_runner/$ACT_RUNNER_VERSION/act_runner-$ACT_RUNNER_VERSION-linux-$DOWNLOAD_VARIANT"
mkdir -p "$INSTALL_DIR"
wget -q "$DOWNLOAD_URL" -O "$RUNNER" || {
echo "Failed to download act_runner from $DOWNLOAD_URL"
exit 1
}
chmod +x "$INSTALL_DIR/act_runner"
echo "act_runner downloaded and made executable."
DEFAULT_LABELS=(
"ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
"ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
"ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
)
LABELS=("${DEFAULT_LABELS[@]}" "${ADDITIONAL_LABLES[@]}")
LABLES_STRING=$(IFS=','; echo "${LABELS[*]}")
$RUNNER generate-config > "$INSTALL_DIR/config.yaml"
echo "act_runner config generated."
echo "act_runner config file created at $INSTALL_DIR/config.yaml"
pushd "$INSTALL_DIR" || exit 1
$RUNNER register \
--no-interactive \
--config config.yaml \
--instance $GITEA_URL \
--token $GITEA_TOKEN \
--name $RUNNER_NAME \
--labels $LABLES_STRING
popd || exit 1
# Create systemd service file
SERVICE_FILE="${SERVICE_NAME}.service"
cp "$SERVICE_FILE.template" "$SERVICE_FILE"
ESC_RUNNER=$(printf '%s' "$RUNNER" | sed 's:/:\\/:g')
ESC_INSTALL_DIR=$(printf '%s' "$INSTALL_DIR" | sed 's:/:\\/:g')
sed -i "s|^ExecStart=.*$|ExecStart=${ESC_RUNNER} daemon --config ${ESC_INSTALL_DIR}/config.yaml|" "$SERVICE_FILE"
sed -i "s|^WorkingDirectory=.*$|WorkingDirectory=${ESC_INSTALL_DIR}|" "$SERVICE_FILE"
mv "$SERVICE_FILE" "$USER_SYSTEMD/$SERVICE_FILE"
systemctl --user daemon-reload
systemctl --user enable --now "${SERVICE_NAME}.service"
echo "Gitea act_runner installed and systemd service created."
echo "manually add additional labels to the config file - $INSTALL_DIR/config.yaml"
echo "add the following labels to the config file - $INSTALL_DIR/config.yaml"
for label in "${ADDITIONAL_LABLES[@]}"; do
echo " - $label"
done
echo "manually add docker socket to the config file - $INSTALL_DIR/config.yaml"
echo "add the following docker socket to the config file - $INSTALL_DIR/config.yaml"
echo " - $CONTAINER_HOST"
echo "Then restart service."

15
gitea_runner/uninstall.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
# This script is used to uninstall Gitea act_runner
. ./env.sh
if systemctl --user list-units --full --all | grep -q "${SERVICE_NAME}.service"; then
systemctl --user stop "${SERVICE_NAME}.service"
fi
systemctl --user disable --now "${SERVICE_NAME}.service"
rm "$USER_SYSTEMD/${SERVICE_NAME}.service"
systemctl --user daemon-reload
echo "Uninstall complete. Manually remove data directory - $INSTALL_DIR if needed."

37
memos/backup.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/bin/bash
# Note: use rclone for backup, needs manually configuration.
export XDG_RUNTIME_DIR="/run/user/$(id -u)"
export DBUS_SESSION_BUS_ADDRESS="unix:path=$XDG_RUNTIME_DIR/bus"
DATA=""
DB=""
LOCAL_BACKUP="$HOME/.local/backup"
REMOTE=""
DB_USERNAME=""
DB_PASSWORD=""
DB_DATABASE=""
CONTAINER_DB=""
SERVICE=""
STAGING_DIR=$(mktemp -d)
mkdir -p $LOCAL_BACKUP
DATE=$(date +%F-%H-%M-%S)
BACKUP_NAME="backup_$DATE.tar.gz"
systemctl --user stop $SERVICE
podman exec $CONTAINER_DB pg_dump -U $DB_USERNAME -F c -d $DB_DATABASE > $STAGING_DIR/db.dump
cp -r "$DATA" "$STAGING_DIR/data"
cp -r "$DB" "$STAGING_DIR/db"
tar -czf "$LOCAL_BACKUP/$BACKUP_NAME" -C "$STAGING_DIR" .
ls -1t "$LOCAL_BACKUP"/backup_*.tar.gz | tail -n +6 | xargs -r rm --
/usr/bin/rclone sync $LOCAL_BACKUP $REMOTE > /dev/null
rm -rf $STAGING_DIR
systemctl --user start $SERVICE

144
memos/deploy.sh Executable file
View File

@@ -0,0 +1,144 @@
#!/bin/bash
. ./env.sh
set -e
services=("$CONTAINER_PREFIX-$CONTAINER_SERVICE.service"
"$CONTAINER_PREFIX-$CONTAINER_DB.service"
)
for service in "${services[@]}"; do
if systemctl --user list-units --full --all | grep -q "$service"; then
echo "Stopping $service..."
systemctl --user stop $service
echo "$service stopped."
fi
done
containers=(
"$CONTAINER_SERVICE"
"$CONTAINER_DB"
)
for container in "${containers[@]}"; do
if podman container exists "$container"; then
echo "Stop and delete existing container $container"
if podman inspect -f '{{.State.Running}}' "$container" | grep -q true; then
podman stop "$container"
fi
podman rm "$container"
fi
done
if ! podman network exists $NETWORK; then
podman network create $NETWORK
fi
mkdir -p $DATA_FOLDER
mkdir -p $DB_FOLDER
mkdir -p $USER_SYSTEMD
podman create \
--name $CONTAINER_DB \
--network $NETWORK \
--userns=keep-id \
--restart=always \
-p $PORT_DB:5432 \
-e POSTGRES_USER=$DB_USER \
-e POSTGRES_PASSWORD=$DB_PASSWORD \
-e POSTGRES_DB=$DB_NAME \
-e POSTGRES_HOST_AUTH_METHOD=trust \
-v "$DB_FOLDER:/var/lib/postgresql/data:Z" \
docker.io/library/postgres:16
podman generate systemd \
--new \
--name $CONTAINER_DB \
--files --restart-policy always --container-prefix=$CONTAINER_PREFIX > /dev/null
mv $CONTAINER_PREFIX-$CONTAINER_DB.service $USER_SYSTEMD
systemctl --user daemon-reload
systemctl --user enable --now $CONTAINER_PREFIX-$CONTAINER_DB.service
echo "Wait for PostgreSQL..."
until podman exec $CONTAINER_DB pg_isready -U "$DB_USER" -d "$DB_NAME" > /dev/null 2>&1; do
sleep 2
done
echo "PostgreSQL ready"
podman create \
--name $CONTAINER_SERVICE \
--network $NETWORK \
--restart=always \
-p $PORT_WEB:5230 \
-v "$DATA_FOLDER:/var/opt/memos:Z" \
-e MEMOS_DRIVER=postgres \
-e MEMOS_DSN="postgresql://$DB_USER:$DB_USER@$HOST_DB:$PORT_DB/memos?sslmode=disable" \
docker.io/neosmemo/memos:stable
podman generate systemd \
--new \
--name $CONTAINER_SERVICE \
--files \
--container-prefix=$CONTAINER_PREFIX \
--restart-policy=always
sed -i "/^\[Unit\]/a After=$CONTAINER_PREFIX-$CONTAINER_DB.service \nRequires=$CONTAINER_PREFIX-$CONTAINER_DB.service" $CONTAINER_PREFIX-$CONTAINER_SERVICE.service
mv $CONTAINER_PREFIX-$CONTAINER_SERVICE.service $USER_SYSTEMD
systemctl --user daemon-reload
systemctl --user enable --now $CONTAINER_PREFIX-$CONTAINER_SERVICE.service
sudo loginctl enable-linger $USER
# generate haproxy blocks
sudo mkdir -p $SERVICE_DIR
echo "crt $SSL_PATH/fullchain.pem" | sudo tee $SERVICE_DIR/cert.block > /dev/null
ACL_CFG=$(cat <<EOF
acl is_memos hdr(host) -i $DOMAIN
use_backend memos_backend if is_memos
EOF
)
echo "$ACL_CFG" | sudo tee $SERVICE_DIR/acl.block > /dev/null
BACKEND_CFG=$(cat <<EOF
backend memos_backend
mode http
option httpchk GET /
option forwardfor
option http-server-close
server memoshttp 127.0.0.1:$PORT_WEB alpn http/1.1 check
# === CORS & proxy headers ===
http-request set-header X-Forwarded-For %[src]
http-request set-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Host %[req.hdr(Host)]
http-request set-header X-Real-IP %[src]
# === WebSocket support ===
http-request set-header Connection "upgrade" if { req.hdr(Upgrade) -i websocket }
http-request set-header Upgrade "%[req.hdr(Upgrade)]" if { req.hdr(Upgrade) -i websocket }
EOF
)
echo "$BACKEND_CFG" | sudo tee $SERVICE_DIR/backend.block > /dev/null
echo "Generate backup script"
BACKUP_FILE="memos_backup.sh"
cp backup.sh $BACKUP_FILE
sed -i "s|^DATA=\"\"|DATA=\"$DATA_FOLDER\"|" "$BACKUP_FILE"
sed -i "s|^DB=\"\"|DB=\"$DB_FOLDER\"|" "$BACKUP_FILE"
sed -i "s|^DB_USERNAME=\"\"|DB_USERNAME=\"$DB_USER\"|" "$BACKUP_FILE"
sed -i "s|^DB_DATABASE=\"\"|DB_DATABASE=\"$DB_NAME\"|" "$BACKUP_FILE"
sed -i "s|^DB_PASSWORD=\"\"|DB_PASSWORD=\"$DB_PASSWORD\"|" "$BACKUP_FILE"
sed -i "s|^LOCAL_BACKUP=\"\$HOME/.local/backup\"|LOCAL_BACKUP=\"\$HOME/.local/backup/$CONTAINER_PREFIX\"|" "$BACKUP_FILE"
sed -i "s|^CONTAINER_DB=\"\"|CONTAINER_DB=\"$CONTAINER_DB\"|" "$BACKUP_FILE"
sed -i "s|^REMOTE=\"\"|REMOTE=\"$BACKUP_REMOTE\"|" "$BACKUP_FILE"
sed -i "s|^SERVICE=\"\"|SERVICE=\"${CONTAINER_PREFIX}-${CONTAINER_SERVICE}.service\"|" "$BACKUP_FILE"
mv $BACKUP_FILE $APP_DIR
echo "Backup script generated at $APP_FOLDER/$BACKUP_FILE"
echo "Backup script will be run every day at 2:00 AM"
crontab -l | grep -v "$APP_FOLDER/$BACKUP_FILE" | crontab -
(crontab -l 2>/dev/null; echo "0 2 * * * $APP_DIR/$BACKUP_FILE") | crontab -
echo "Backup script added to crontab"
echo "Deploy completed, manually run haproxy to generate new config."

21
memos/env.sh Normal file
View File

@@ -0,0 +1,21 @@
CONTAINER_PREFIX="memos"
CONTAINER_SERVICE="memos"
CONTAINER_DB="memos-db"
NETWORK="memos-net"
APP_DIR="$HOME/.local/share/memos"
DATA_FOLDER=$APP_DIR/data
DB_FOLDER=$APP_DIR/db
USER_SYSTEMD="$HOME/.config/systemd/user"
HOST_DB="host.containers.internal"
PORT_DB=5632
PORT_WEB=5630
DB_USER="memos"
DB_PASSWORD="memos"
DB_NAME="memos"
DOMAIN=""
SSL_PATH=$HOME/.config/ssl/$DOMAIN
HAPROXY_CFG_DIR="/etc/haproxy"
HAPROXY_CFG="$HAPROXY_CFG_DIR/haproxy.cfg"
SERVICE_DIR="$HAPROXY_CFG_DIR/services/$DOMAIN"
BACKUP_REMOTE="onedrive-tianyu:Backups/memos"

43
memos/uninstall.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
. ./env.sh
services=("$CONTAINER_PREFIX-$CONTAINER_SERVICE.service"
"$CONTAINER_PREFIX-$CONTAINER_DB.service"
)
for service in "${services[@]}"; do
if systemctl --user list-units --full --all | grep -q "$service"; then
echo "Stopping $service..."
systemctl --user stop $service
echo "$service stopped."
fi
done
containers=(
"$CONTAINER_SERVICE"
"$CONTAINER_DB"
)
for container in "${containers[@]}"; do
if podman container exists "$container"; then
echo "Stop and delete existing container $container"
if podman inspect -f '{{.State.Running}}' "$container" | grep -q true; then
podman stop "$container"
fi
podman rm "$container"
fi
done
for service in "${services[@]}"; do
systemctl --user disable --now $service
rm $USER_SYSTEMD/$service
done
sudo rm -r $SERVICE_DIR
crontab -l | grep -v "$APP_DIR/$BACKUP_FILE" | crontab -
echo "Uninstall complete. Manually run haproxy config to rebuild config. Manually remove data directory
- $APP_DIR
- $HOME/.local/backup/$CONTAINER_PREFIX
if needed."

View File

@@ -1,17 +1,18 @@
#!/bin/bash
WG_NUM=$1
. ./env_center.sh
WG_CONF="/etc/wireguard"
PRIV_KEY_FILE="wg${WG_NUM}_privatekey"
PUB_KEY_FILE="wg${WG_NUM}_publickey"
CONF_FILE="wg${WG_NUM}.conf"
if [ -z "$1" ]; then
if [ -z "$WG_NUM" ]; then
echo "Wireguard interface number not provided."
exit 1
fi
if ! [[ "$1" =~ ^-?[0-9]+([.][0-9]+)?$ ]]; then
if ! [[ "$WG_NUM" =~ ^-?[0-9]+([.][0-9]+)?$ ]]; then
echo "Wireguard interface number has to be a number."
exit 2
fi
@@ -24,16 +25,10 @@ sudo mkdir -p $WG_CONF
sudo mv ./$PRIV_KEY_FILE $WG_CONF
sudo mv ./$PUB_KEY_FILE $WG_CONF
read -p "Enter IP (as server) (e.g. 192.168.${WG_NUM}.1/24): " ADDRESS
read -p "Enter wireguard subnet, should be the subnet of server IP: " WG_SUBNET
read -p "Enter physical interface for nat out: " PHY
read -p "Enter port to listern (e.g. 51820): " LISTEN_PORT
CONF_CONTENT="[Interface]
PrivateKey = $PRIVATE_KEY
SaveConfig = false
Address = $ADDRESS
Address = $WG_IP
ListenPort = $LISTEN_PORT
PostUp = iptables -A FORWARD -i wg$WG_NUM -j ACCEPT; iptables -t nat -A POSTROUTING -s $WG_SUBNET -o $PHY -j MASQUERADE
PostDown = iptables -D FORWARD -i wg$WG_NUM -j ACCEPT; iptables -t nat -D POSTROUTING -s $WG_SUBNET -o $PHY -j MASQUERADE

View File

@@ -0,0 +1,41 @@
#!/bin/bash
. ./env_center.sh
WG_CONF="/etc/wireguard"
PRIV_KEY_FILE="wg${WG_NUM}_privatekey"
PUB_KEY_FILE="wg${WG_NUM}_publickey"
CONF_FILE="wg${WG_NUM}.conf"
if [ -z "$WG_NUM" ]; then
echo "Wireguard interface number not provided."
exit 1
fi
if ! [[ "$WG_NUM" =~ ^-?[0-9]+([.][0-9]+)?$ ]]; then
echo "Wireguard interface number has to be a number."
exit 2
fi
wg genkey | tee wg"$WG_NUM"_privatekey | wg pubkey > wg"$WG_NUM"_publickey
PRIVATE_KEY=$(cat $PRIV_KEY_FILE)
sudo mkdir -p $WG_CONF
sudo mv ./$PRIV_KEY_FILE $WG_CONF
sudo mv ./$PUB_KEY_FILE $WG_CONF
CONF_CONTENT="[Interface]
PrivateKey = $PRIVATE_KEY
SaveConfig = false
Address = $WG_IP
ListenPort = $LISTEN_PORT"
echo "$CONF_CONTENT" > "$CONF_FILE"
sudo mv "$CONF_FILE" "$WG_CONF"
echo "Config saved to: $WG_CONF/$CONF_FILE"
sudo systemctl enable wg-quick@"wg$WG_NUM"
sudo systemctl start wg-quick@"wg$WG_NUM"

9
wireguard/env_center.sh Normal file
View File

@@ -0,0 +1,9 @@
WG_NUM="1"
WG_IP="192.168.2.1/24" # IP of the server e.g. 192.168.2.1/24
WG_SUBNET="192.168.2.0/24" # Subnet of the server IP
PHY="eth0" # Physical interface for NAT out
LISTEN_PORT="51821" # Port to listen (e.g. 51820)
DNS_SERVER="8.8.8.8"
ALLOWED_IPS="$WG_SUBNET"

8
wireguard/env_peer.sh Normal file
View File

@@ -0,0 +1,8 @@
WG_NUM="0"
MY_IP=""
DNS_SERVER=""
PEER_PUBLIC_KEY=""
PEER_ENDPOINT=""
ALLOWED_IPS=""

View File

@@ -1,17 +1,18 @@
#!/bin/bash
WG_NUM=$1
. ./env_peer.sh
WG_CONF="/etc/wireguard"
PRIV_KEY_FILE="wg${WG_NUM}_privatekey"
PUB_KEY_FILE="wg${WG_NUM}_publickey"
CONF_FILE="wg${WG_NUM}.conf"
if [ -z "$1" ]; then
if [ -z "$WG_NUM" ]; then
echo "Wireguard interface number not provided."
exit 1
fi
if ! [[ "$1" =~ ^-?[0-9]+([.][0-9]+)?$ ]]; then
if ! [[ "$WG_NUM" =~ ^-?[0-9]+([.][0-9]+)?$ ]]; then
echo "Wireguard interface number has to be a number."
exit 2
fi
@@ -24,16 +25,29 @@ sudo mkdir -p $WG_CONF
sudo mv ./$PRIV_KEY_FILE $WG_CONF
sudo mv ./$PUB_KEY_FILE $WG_CONF
read -p "Enter IP (as peer) (e.g. 192.168.180.2/24): " ADDRESS
read -p "Enter DNS server (e.g. 8.8.8.8): " DNS
CONF_CONTENT="[Interface]
CONF_INTERFACE="[Interface]
PrivateKey = $PRIVATE_KEY
Address = $ADDRESS
DNS = $DNS
Address = $MY_IP/32
DNS = $DNS_SERVER
"
echo "$CONF_INTERFACE" > "$CONF_FILE"
CONF_PEER="[Peer]
PublicKey = $PEER_PUBLIC_KEY
AllowedIPs = $ALLOWED_IPS
Endpoint = $PEER_ENDPOINT
PersistentKeepalive = 25
"
echo "$CONF_PEER" >> "$CONF_FILE"
echo "$CONF_CONTENT" > "$CONF_FILE"
sudo mv "$CONF_FILE" "$WG_CONF"
echo "Config saved to: $WG_CONF/$CONF_FILE"
echo "Config saved to: $WG_CONF/$CONF_FILE"
echo "Add the following to the server config:"
echo "[Peer]
PublicKey = $(sudo cat $WG_CONF/$PUB_KEY_FILE)
AllowedIPs = $MY_IP/32
"
sudo systemctl enable wg-quick@"wg$WG_NUM"
sudo systemctl start wg-quick@"wg$WG_NUM"