Compare commits

..

3 Commits

Author SHA1 Message Date
Sönke Domröse
1a72a7c0ff Merge remote-tracking branch 'origin/main' into feature/allthestuff
# Conflicts:
#	caddy/Caddyfile
#	startall.sh
2025-04-24 15:37:10 +02:00
Sönke Domröse
266f32b917 readme + restart script 2025-04-24 15:34:57 +02:00
Sönke Domröse
2e618e910a Add all the containers 2025-04-24 14:55:20 +02:00
154 changed files with 764 additions and 138923 deletions

2
.idea/.gitignore generated vendored
View File

@@ -11,5 +11,3 @@
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
/AndroidProjectSystem.xml
/sonarlint.xml

19
.idea/php.xml generated
View File

@@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="MessDetectorOptionsConfiguration">
<option name="transferred" value="true" />
</component>
<component name="PHPCSFixerOptionsConfiguration">
<option name="transferred" value="true" />
</component>
<component name="PHPCodeSnifferOptionsConfiguration">
<option name="highlightLevel" value="WARNING" />
<option name="transferred" value="true" />
</component>
<component name="PhpStanOptionsConfiguration">
<option name="transferred" value="true" />
</component>
<component name="PsalmOptionsConfiguration">
<option name="transferred" value="true" />
</component>
</project>

View File

@@ -1,202 +0,0 @@
auth.domr.ovh,
auth.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8444
}
bookstack.domr.ovh,
bookstack.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6875
}
bracket.domr.ovh:443,
bracket.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3000
}
backend.bracket.domr.ovh:443,
backend.bracket.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8400
}
caddy.domr.ovh,
caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8881
}
api.caddy.domr.ovh,
api.caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2019
}
changedetect.domr.ovh:80,
changedetect.home.domroese.eu:80,
changedetect.domr.ovh:443,
changedetect.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5238
}
chartbrew.domr.ovh,
chartbrew.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4018
}
chartbrew.domr.ovh:4019,
chartbrew.home.domroese.eu:4019 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4019
}
onboarding.domr.ovh,
onboarding.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8517
}convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}
todos.domr.ovh, #donetick
todos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2021
}
erugo.domr.ovh,
erugo.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9997
}
excalidraw.domr.ovh,
excalidraw.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8267
}
firefly.domr.ovh,
firefly.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8950
}
rss.domr.ovh,
rss.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8884
}
git.domr.ovh,
git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}
homarr.domr.ovh,
homarr.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:7575
}
homepage.domr.ovh:80,
homepage.domr.ovh:443,
homepage.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3891
}
ittools.domr.ovh:443,
ittools.home.domroese.eu:443,
ittools.domr.ovh:80,
ittools.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9080
}
jenkins.domr.ovh,
jenkins.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8040
}
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9925
}
memos.domr.ovh,
memos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5230
}
ntfy.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8234
}
chat.domr.ovh,
chat.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1180
}omnitools.domr.ovh,
omnitools.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8579
}
paperless.domr.ovh:443,
paperless.home.domroese.eu:443,
paperless.domr.ovh:80,
paperless.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1000
}
pihole.domr.ovh,
pihole.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2000
}
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}
shiori.domr.ovh,
shiori.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2661
}
speedtesttracker.domr.ovh,
speedtesttracker.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1378
}
pdf.domr.ovh,
pdf.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3614
}
uptimekuma.domr.ovh,
uptimekuma.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8030
}
vault.domr.ovh:443,
vault.home.domroese.eu:443,
vault.domr.ovh:80,
vault.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4080
}
wallos.domr.ovh,
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8282
}

Submodule McpDiceRoller deleted from 4fa59ed153

View File

@@ -2,41 +2,47 @@
Docker Container
## some commands
* show only container name and status:
```
docker ps --format '{{.Names}}|{{.Status}}' | column -t -s "|"
```
### addresspool full:
edit /etc/docker/daemon.json
```
{
"default-address-pools": [
{
"base" : "172.16.0.0/12",
"size" : 24
}
]
}
```
## available containers:
#### Env-Vars
/etc/environment
```
SYSTEM_EMAIL_USER="some@thing.de"
SYSTEM_EMAIL_PASSSWORD="asdf"
SYSTEM_EMAIL_SMTP_HOST="mail.ovh.net"
SYSTEM_EMAIL_SMTP_PORT="465"
```
### SMTP Settings
SMTP_USER=${SYSTEM_EMAIL_USER}
SMTP_PASSWORD=${SYSTEM_EMAIL_PASSSWORD}
SMTP_HOST=${SYSTEM_EMAIL_SMTP_HOST}
SMTP_PORT=${SYSTEM_EMAIL_SMTP_PORT}
SMTP_FROM_ADDRESS=${SYSTEM_EMAIL_USER}
### OAUTH2 Integration
* use https://auth.domr.ovh
* [git](https://git.home.domroese.eu)
* git server
* [guac](https://guac.home.domroese.eu)
* guacamole, ssh and remotedesktop sessions
* [rss](https://rss.home.domroese.eu)
* rss reader
* [morphos](https://morphos.home.domroese.eu)
* Image and Video converter
* [uptimekuma](https://uptimekuma.home.domroese.eu)
* monitoring
* [kopia](https://kopia.home.domroese.eu)
* backup tools
* [jenkins](https://jenkins.home.domroese.eu)
* [pihole](https://pihole.home.domroese.eu)
* [paperless](https://paperless.home.domroese.eu)
* DMS
* [ittools](https://ittools.home.domroese.eu)
* just tools
* [omnitools](https://omnitools.home.domroese.eu)
* also just tools
* [vault](https://vault.home.domroese.eu)
* key vault
* [chat](https://chat.home.domroese.eu)
* LMMs
* [budibase](https://budibase.home.domroese.eu)
* No-Code coding
* [erugo](https://erugo.home.domroese.eu)
* Filesharing
* [excalidraw](https://excalidraw.home.domroese.eu)
*
* [homarr](https://homarr.home.domroese.eu)
* monitoring
* [homepage](https://homepage.home.domroese.eu)
* Homepage
* [mealie](https://mealie.home.domroese.eu)
* Reciepes
* [shiori](https://shiori.home.domroese.eu)
* Bookmarks
* [wallos](https://wallos.home.domroese.eu)
* Finance, tracking of Subscriptions
* [nas](https://nas.home.domroese.eu)

View File

@@ -0,0 +1,29 @@
services:
actual_server:
image: docker.io/actualbudget/actual-server:latest
ports:
# This line makes Actual available at port 5006 of the device you run the server on,
# i.e. http://localhost:5006. You can change the first number to change the port, if you want.
- '5006:5006'
environment:
# Uncomment any of the lines below to set configuration options.
# - ACTUAL_HTTPS_KEY=/data/selfhost.key
# - ACTUAL_HTTPS_CERT=/data/selfhost.crt
# - ACTUAL_PORT=5006
# - ACTUAL_UPLOAD_FILE_SYNC_SIZE_LIMIT_MB=20
# - ACTUAL_UPLOAD_SYNC_ENCRYPTED_FILE_SYNC_SIZE_LIMIT_MB=50
# - ACTUAL_UPLOAD_FILE_SIZE_LIMIT_MB=20
# See all options and more details at https://actualbudget.github.io/docs/Installing/Configuration
# !! If you are not using any of these options, remove the 'environment:' tag entirely.
volumes:
# Change './actual-data' below to the path to the folder you want Actual to store its data in on your server.
# '/data' is the path Actual will look for its files in by default, so leave that as-is.
- /home/soenke/docker-data/actualBudget/data:/data
healthcheck:
# Enable health check for the instance
test: ['CMD-SHELL', 'node src/scripts/health-check.js']
interval: 60s
timeout: 10s
retries: 3
start_period: 20s
restart: unless-stopped

1
api.md
View File

@@ -1 +0,0 @@
uk1_NptdpvVbC14Nt3qZ6EJHt9IrFGuSurmqSM6cSf8y

View File

@@ -1,17 +0,0 @@
# SMTP Host Emails are sent to
AUTHENTIK_EMAIL__HOST=smtp.mail.ovh.net
AUTHENTIK_EMAIL__PORT=465
# Optionally authenticate (don't add quotation marks to your password)
AUTHENTIK_EMAIL__USERNAME=soenke@domr.ovh
AUTHENTIK_EMAIL__PASSWORD=5Qy6/Hmo&IMl
# Use StartTLS
AUTHENTIK_EMAIL__USE_TLS=false
# Use SSL
AUTHENTIK_EMAIL__USE_SSL=true
AUTHENTIK_EMAIL__TIMEOUT=20
# Email address authentik will send from, should have a correct @domain
AUTHENTIK_EMAIL__FROM=soenke@domr.ovh
COMPOSE_PORT_HTTP=8444
COMPOSE_PORT_HTTPS=9444
PG_PASS=ygANkw/2DERtCPtVx/sByUz8oHh/AA/MKGvixunGJQdzjuI6
AUTHENTIK_SECRET_KEY=zCojkJpDWAapikKGWR812mSDXwS1bznfTJ+PIyAbqZAiZaKxhyRZTOPq3djqdMtIsqdoPr+HnJNfn9QG

View File

@@ -1,5 +0,0 @@
auth.domr.ovh,
auth.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8444
}

View File

@@ -1,6 +0,0 @@
* After install run
```
echo "PG_PASS=$(openssl rand -base64 36 | tr -d '\n')" >> .env
echo "AUTHENTIK_SECRET_KEY=$(openssl rand -base64 60 | tr -d '\n')" >> .env
# echo "AUTHENTIK_ERROR_REPORTING__ENABLED=true" >> .env
```

View File

@@ -1,85 +0,0 @@
services:
postgresql:
env_file:
- .env
environment:
POSTGRES_DB: ${PG_DB:-authentik}
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
healthcheck:
interval: 30s
retries: 5
start_period: 20s
test:
- CMD-SHELL
- pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}
timeout: 5s
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
volumes:
- /home/soenke/docker-data/authentik/database:/var/lib/postgresql/data
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.authentik.http.name: 'Authentik'
kuma.authentik.http.url: 'https://auth.domr.ovh'
kuma.authentik.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'
homepage.group: Hosting
homepage.name: Authentik
homepage.icon: authentik.png
homepage.href: https://auth.domr.ovh/
homepage.description: Authentik Oauth2 Service
homepage.widget.type: authentik
homepage.widget.url: https://auth.domr.ovh/
homepage.widget.key: slGO2rsG4xTObyuzRYPEe4Gs92X8TeNblIYOstX0rCID1WEv6wT5wkz4filJ
server:
command: server
depends_on:
postgresql:
condition: service_healthy
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.12.3}
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
restart: unless-stopped
volumes:
- /home/soenke/docker-data/authentik/data:/data
- /home/soenke/docker-data/authentik/media:/media
- /home/soenke/docker-data/authentik/custom-templates:/templates
worker:
command: worker
depends_on:
postgresql:
condition: service_healthy
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.12.3}
restart: unless-stopped
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /home/soenke/docker-data/authentik/media:/media
- /home/soenke/docker-data/authentik/certs:/certs
- /home/soenke/docker-data/authentik/custom-templates:/templates
volumes:
database:
driver: local

View File

@@ -1,65 +0,0 @@
services:
postgresql:
env_file:
- .env
environment:
POSTGRES_DB: ${PG_DB:-authentik}
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
healthcheck:
interval: 30s
retries: 5
start_period: 20s
test:
- CMD-SHELL
- pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}
timeout: 5s
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
volumes:
- database:/var/lib/postgresql/data
server:
command: server
depends_on:
postgresql:
condition: service_healthy
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.12.3}
ports:
- ${COMPOSE_PORT_HTTP:-9000}:9000
- ${COMPOSE_PORT_HTTPS:-9443}:9443
restart: unless-stopped
volumes:
- ./data:/data
- ./custom-templates:/templates
worker:
command: worker
depends_on:
postgresql:
condition: service_healthy
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.12.3}
restart: unless-stopped
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data
- ./certs:/certs
- ./custom-templates:/templates
volumes:
database:
driver: local

View File

View File

@@ -1,29 +0,0 @@
services:
autokuma:
image: ghcr.io/bigboot/autokuma:latest
restart: unless-stopped
environment:
AUTOKUMA__KUMA__URL: https://uptimekuma.domr.ovh/
AUTOKUMA__KUMA__USERNAME: "Soenke"
AUTOKUMA__KUMA__PASSWORD: "RvG7ULSTLf7cN39XCEnH4BVEjx4BuQgJ"
# AUTOKUMA__KUMA__MFA_TOKEN: <token>
# AUTOKUMA__KUMA__HEADERS: "<header1_key>=<header1_value>,<header2_key>=<header2_value>,..."
AUTOKUMA__KUMA__CALL_TIMEOUT: 5
AUTOKUMA__KUMA__CONNECT_TIMEOUT: 5
AUTOKUMA__TAG_NAME: AutoKuma
AUTOKUMA__TAG_COLOR: "#42C0FB"
AUTOKUMA__DEFAULT_SETTINGS: |-
docker.docker_container: {{container_name}}
http.max_redirects: 10
*.max_retries: 3
# AUTOKUMA__SNIPPETS__WEB: |-
# {{container_name}}_http.http.name: {{container_name}} HTTP
# {{container_name}}_http.http.url: https://{{@0}}:{{@1}}
# {{container_name}}_docker.docker.name: {{container_name}} Docker
# {{container_name}}_docker.docker.docker_container: {{container_name}}
AUTOKUMA__DOCKER__HOSTS: unix:///var/run/docker.sock
# AUTOKUMA__DOCKER__LABEL_PREFIX: kuma
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /home/soenke/docker-data/autokuma/data:/data

View File

View File

@@ -1,16 +0,0 @@
services:
beszel-agent:
image: henrygd/beszel-agent
container_name: beszel-agent
restart: unless-stopped
network_mode: host
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /home/soenke/docker-data/beszel-agent/beszel_agent_data:/var/lib/beszel-agent
# monitor other disks / partitions by mounting a folder in /extra-filesystems
# - /mnt/disk/.beszel:/extra-filesystems/sda1:ro
environment:
LISTEN: 45876
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEVA2+hStjbFgCmiZl80+JFDZZxePZ4fRV8hEwLj3/o5'
TOKEN: b608d327-43be-4a2c-a4fb-7e6606639fab
HUB_URL: https://beszel.domr.ovh

View File

@@ -1,5 +0,0 @@
beszel.domr.ovh,
beszel.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:7090
}

View File

@@ -1,9 +0,0 @@
services:
beszel:
image: henrygd/beszel
container_name: beszel
restart: unless-stopped
ports:
- 7090:8090
volumes:
- /home/soenke/docker-data/beszel/beszel_data:/beszel_data

31
budibase/.env Normal file
View File

@@ -0,0 +1,31 @@
# Use the main port in the builder for your self hosting URL, e.g. localhost:10000
MAIN_PORT=10000
# This section contains all secrets pertaining to the system
# These should be updated
API_ENCRYPTION_KEY="r*6V&XX#Nc@KhkB7"
JWT_SECRET="4hXZmFIvjmb$!jIp"
MINIO_ACCESS_KEY="*PG2M5^6WByh!uT1"
MINIO_SECRET_KEY="I238Lt@TqB#eTtG%"
COUCH_DB_PASSWORD="OgDcjjIA^Q6i0*VN"
COUCH_DB_USER=budibase
REDIS_PASSWORD="wfU2ufV*4#pxSyr&"
INTERNAL_API_KEY="pWCxHj8*A6bfEzTB"
# This section contains variables that do not need to be altered under normal circumstances
APP_PORT=4002
WORKER_PORT=4003
MINIO_PORT=4004
COUCH_DB_PORT=4005
COUCH_DB_SQS_PORT=4006
REDIS_PORT=6379
BUDIBASE_ENVIRONMENT=PRODUCTION
SQL_MAX_ROWS=
# An admin user can be automatically created initially if these are set
BB_ADMIN_USER_EMAIL=soenke@domroese.eu
BB_ADMIN_USER_PASSWORD="$$3ljjnr6#nGO3pP"
# A path that is watched for plugin bundles. Any bundles found are imported automatically/
PLUGINS_DIR=
ROLLING_LOG_MAX_SIZE=

120
budibase/docker-compose.yml Normal file
View File

@@ -0,0 +1,120 @@
version: "3"
# optional ports are specified throughout for more advanced use cases.
services:
app-service:
restart: unless-stopped
image: budibase/apps
container_name: bbapps
environment:
SELF_HOSTED: 1
COUCH_DB_URL: http://${COUCH_DB_USER}:${COUCH_DB_PASSWORD}@couchdb-service:5984
WORKER_URL: http://worker-service:4003
MINIO_URL: http://minio-service:9000
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY}
INTERNAL_API_KEY: ${INTERNAL_API_KEY}
BUDIBASE_ENVIRONMENT: ${BUDIBASE_ENVIRONMENT}
PORT: 4002
API_ENCRYPTION_KEY: ${API_ENCRYPTION_KEY}
JWT_SECRET: ${JWT_SECRET}
LOG_LEVEL: info
ENABLE_ANALYTICS: "true"
REDIS_URL: redis-service:6379
REDIS_PASSWORD: ${REDIS_PASSWORD}
BB_ADMIN_USER_EMAIL: ${BB_ADMIN_USER_EMAIL}
BB_ADMIN_USER_PASSWORD: ${BB_ADMIN_USER_PASSWORD}
PLUGINS_DIR: ${PLUGINS_DIR}
OFFLINE_MODE: ${OFFLINE_MODE:-}
depends_on:
- worker-service
- redis-service
volumes:
- /home/soenke/docker-data/budibase/plugins:/plugins
worker-service:
restart: unless-stopped
image: budibase/worker
container_name: bbworker
environment:
SELF_HOSTED: 1
PORT: 4003
CLUSTER_PORT: ${MAIN_PORT}
API_ENCRYPTION_KEY: ${API_ENCRYPTION_KEY}
JWT_SECRET: ${JWT_SECRET}
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY}
MINIO_URL: http://minio-service:9000
APPS_URL: http://app-service:4002
COUCH_DB_USERNAME: ${COUCH_DB_USER}
COUCH_DB_PASSWORD: ${COUCH_DB_PASSWORD}
COUCH_DB_URL: http://${COUCH_DB_USER}:${COUCH_DB_PASSWORD}@couchdb-service:5984
INTERNAL_API_KEY: ${INTERNAL_API_KEY}
REDIS_URL: redis-service:6379
REDIS_PASSWORD: ${REDIS_PASSWORD}
OFFLINE_MODE: ${OFFLINE_MODE:-}
depends_on:
- redis-service
- minio-service
minio-service:
restart: unless-stopped
image: minio/minio
volumes:
- minio_data:/data
environment:
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY}
MINIO_BROWSER: "off"
command: server /data --console-address ":9001"
healthcheck:
test: "timeout 5s bash -c ':> /dev/tcp/127.0.0.1/9000' || exit 1"
interval: 30s
timeout: 20s
retries: 3
proxy-service:
restart: unless-stopped
ports:
- "${MAIN_PORT}:10000"
container_name: bbproxy
image: budibase/proxy
environment:
- PROXY_RATE_LIMIT_WEBHOOKS_PER_SECOND=10
- PROXY_RATE_LIMIT_API_PER_SECOND=20
- APPS_UPSTREAM_URL=http://app-service:4002
- WORKER_UPSTREAM_URL=http://worker-service:4003
- MINIO_UPSTREAM_URL=http://minio-service:9000
- COUCHDB_UPSTREAM_URL=http://couchdb-service:5984
- RESOLVER=127.0.0.11
depends_on:
- minio-service
- worker-service
- app-service
- couchdb-service
couchdb-service:
restart: unless-stopped
image: budibase/couchdb:v3.3.3-sqs-v2.1.1
environment:
- COUCHDB_PASSWORD=${COUCH_DB_PASSWORD}
- COUCHDB_USER=${COUCH_DB_USER}
- TARGETBUILD=docker-compose
volumes:
- couchdb3_data:/opt/couchdb/data
redis-service:
restart: unless-stopped
image: redis
command: redis-server --requirepass "${REDIS_PASSWORD}"
volumes:
- redis_data:/data
volumes:
couchdb3_data:
driver: local
minio_data:
driver: local
redis_data:
driver: local

View File

@@ -1,158 +1,110 @@
auth.domr.ovh,
auth.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8444
}
beszel.domr.ovh,
beszel.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:7090
}
caddy.domr.ovh,
caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8881
}
api.caddy.domr.ovh,
api.caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2019
}
chartbrew.domr.ovh,
chartbrew.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4018
}
chartbrew.domr.ovh:4019,
chartbrew.home.domroese.eu:4019 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4019
}
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}
dailytxt.domr.ovh,
dailytxt.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8317
}
dashy.domr.ovh, #donetick
dashy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8832
}
dockpeek.domr.ovh,
dockpeek.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3420
}
excalidraw.domr.ovh,
excalidraw.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8267
}
rss.domr.ovh,
rss.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8884
}
git.domr.ovh,
git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}
haus.domr.ovh,
haus.home.domroese.eu {
rss.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8472
reverse_proxy 192.168.1.65:8884
}
homebox.domr.ovh,
homebox.home.domroese.eu:443 {
morphos.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3100
reverse_proxy 192.168.1.65:8020
}
homepage.domr.ovh:80,
homepage.domr.ovh:443,
homepage.home.domroese.eu:443 {
uptimekuma.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3891
reverse_proxy 192.168.1.65:8030
}
huly.domr.ovh,
huly.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8087
}
ittools.domr.ovh:443,
ittools.home.domroese.eu:443,
ittools.domr.ovh:80,
ittools.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9080
}
journal.domr.ovh,
journiv.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8198
}
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}
jenkins.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8040
}
pihole.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2000
}
paperless.home.domroese.eu:443,
paperless.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1000
}
ittools.home.domroese.eu:443,
ittools.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9080
}
vault.home.domroese.eu:443,
vault.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4080
}
chat.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1180
}
budibase.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4002
}
erugo.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9997
}
excalidraw.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8267
}
homarr.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:7575
}
homepage.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3891
}
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9925
}
memos.domr.ovh,
memos.home.domroese.eu:443 {
omnitools.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5230
reverse_proxy 192.168.1.65:8579
}
shiori.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2661
}
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8282
}
nas.domr.ovh,
nas.home.domroese.eu {
tls soenke@domroese.eu {
client_auth {
@@ -165,100 +117,4 @@ nas.home.domroese.eu {
}
}
}
ntfy.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8234
}
chat.domr.ovh,
chat.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1180
}
omnitools.domr.ovh,
omnitools.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8579
}
paperless.domr.ovh:443,
paperless.home.domroese.eu:443,
paperless.domr.ovh:80,
paperless.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1000
}
pihole.domr.ovh,
pihole.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2000
}
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}
portracker.domr.ovh,
portracker.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4999
}
speedtesttracker.domr.ovh,
speedtesttracker.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1378
}
pdf.domr.ovh,
pdf.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3614
}
uptimekuma.domr.ovh,
uptimekuma.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8030
}
vault.domr.ovh:443,
vault.home.domroese.eu:443,
vault.domr.ovh:80,
vault.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4080
}
wallos.domr.ovh,
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8282
}
yopass.domr.ovh,
yopass.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8667
}

View File

@@ -1,264 +0,0 @@
auth.domr.ovh,
auth.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8444
}
beszel.domr.ovh,
beszel.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:7090
}
caddy.domr.ovh,
caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8881
}
api.caddy.domr.ovh,
api.caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2019
}
chartbrew.domr.ovh,
chartbrew.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4018
}
chartbrew.domr.ovh:4019,
chartbrew.home.domroese.eu:4019 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4019
}
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}
dailytxt.domr.ovh,
dailytxt.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8317
}
dashy.domr.ovh, #donetick
dashy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8832
}
dockpeek.domr.ovh,
dockpeek.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3420
}
excalidraw.domr.ovh,
excalidraw.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8267
}
rss.domr.ovh,
rss.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8884
}
git.domr.ovh,
git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}
haus.domr.ovh,
haus.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8472
}
homebox.domr.ovh,
homebox.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3100
}
homepage.domr.ovh:80,
homepage.domr.ovh:443,
homepage.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3891
}
huly.domr.ovh,
huly.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8087
}
ittools.domr.ovh:443,
ittools.home.domroese.eu:443,
ittools.domr.ovh:80,
ittools.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9080
}
journal.domr.ovh,
journiv.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8198
}
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9925
}
memos.domr.ovh,
memos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5230
}
nas.domr.ovh,
nas.home.domroese.eu {
tls soenke@domroese.eu {
client_auth {
mode request
}
}
reverse_proxy https://192.168.1.194:5001 {
transport http {
tls_insecure_skip_verify # Disable TLS Verification, as we don't have a real certificate on the nas
}
}
}
ntfy.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8234
}
chat.domr.ovh,
chat.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1180
}
omnitools.domr.ovh,
omnitools.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8579
}
paperless.domr.ovh:443,
paperless.home.domroese.eu:443,
paperless.domr.ovh:80,
paperless.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1000
}
pihole.domr.ovh,
pihole.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2000
}
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}
portracker.domr.ovh,
portracker.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4999
}
speedtesttracker.domr.ovh,
speedtesttracker.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1378
}
pdf.domr.ovh,
pdf.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3614
}
uptimekuma.domr.ovh,
uptimekuma.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8030
}
vault.domr.ovh:443,
vault.home.domroese.eu:443,
vault.domr.ovh:80,
vault.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4080
}
wallos.domr.ovh,
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8282
}
yopass.domr.ovh,
yopass.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8667
}

View File

@@ -1,234 +0,0 @@
auth.domr.ovh,
auth.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8444
}
bookstack.domr.ovh,
bookstack.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6875
}
bracket.domr.ovh:443,
bracket.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3000
}
backend.bracket.domr.ovh:443,
backend.bracket.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8400
}
caddy.domr.ovh,
caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8881
}
api.caddy.domr.ovh,
api.caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2019
}
changedetect.domr.ovh:80,
changedetect.home.domroese.eu:80,
changedetect.domr.ovh:443,
changedetect.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5238
}
chartbrew.domr.ovh,
chartbrew.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4018
}
chartbrew.domr.ovh:4019,
chartbrew.home.domroese.eu:4019 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4019
}
onboarding.domr.ovh,
onboarding.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8517
}
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}
todos.domr.ovh, #donetick
todos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2021
}
erugo.domr.ovh,
erugo.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9997
}
excalidraw.domr.ovh,
excalidraw.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8267
}
firefly.domr.ovh,
firefly.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8950
}
rss.domr.ovh,
rss.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8884
}
git.domr.ovh,
git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}
homarr.domr.ovh,
homarr.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:7575
}
homepage.domr.ovh:80,
homepage.domr.ovh:443,
homepage.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3891
}
ittools.domr.ovh:443,
ittools.home.domroese.eu:443,
ittools.domr.ovh:80,
ittools.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9080
}
jenkins.domr.ovh,
jenkins.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8040
}
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9925
}
memos.domr.ovh,
memos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5230
}
ntfy.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8234
}
chat.domr.ovh,
chat.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1180
}
omnitools.domr.ovh,
omnitools.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8579
}
paperless.domr.ovh:443,
paperless.home.domroese.eu:443,
paperless.domr.ovh:80,
paperless.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1000
}
pihole.domr.ovh,
pihole.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2000
}
plantit.domr.ovh,
plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3632
}
api.plantit.domr.ovh,
api.plantit.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8632
}
shiori.domr.ovh,
shiori.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2661
}
speedtesttracker.domr.ovh,
speedtesttracker.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1378
}
pdf.domr.ovh,
pdf.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3614
}
uptimekuma.domr.ovh,
uptimekuma.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8030
}
vault.domr.ovh:443,
vault.home.domroese.eu:443,
vault.domr.ovh:80,
vault.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4080
}
wallos.domr.ovh,
wallos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8282
}

View File

@@ -1,11 +0,0 @@
caddy.domr.ovh,
caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8881
}
api.caddy.domr.ovh,
api.caddy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2019
}

View File

@@ -15,8 +15,6 @@ services:
restart: unless-stopped
networks:
- caddy-network
environment:
- CADDY_ADMIN=0.0.0.0:2019
caddy-ui:
image: qmcgaw/caddy-ui
@@ -26,17 +24,6 @@ services:
- CADDY_API_ENDPOINT=http://192.168.1.65:2019
networks:
- caddy-network
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.caddy.http.name: 'Caddy'
kuma.caddy.http.url: 'https://caddy.domr.ovh'
kuma.caddy.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'
networks:
caddy-network:

View File

@@ -1,10 +0,0 @@
chartbrew.domr.ovh,
chartbrew.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4018
}
chartbrew.domr.ovh:4019,
chartbrew.home.domroese.eu:4019 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:4019
}

View File

@@ -1,40 +0,0 @@
services:
chartbrew:
container_name: chartbrew
image: razvanilin/chartbrew
environment:
- VITE_APP_API_HOST=https://chartbrew.domr.ovh:4019
- VITE_APP_CLIENT_PORT=4018
- VITE_APP_CLIENT_HOST=https://chartbrew.domr.ovh
- CB_REDIS_PASSWORD=Diavid9600
- CB_REDIS_PORT=6379
- CB_REDIS_HOST=host.docker.internal
- CB_DB_PASSWORD=Diavid9600
- CB_DB_USERNAME=root
- CB_DB_NAME=chartbrew
- CB_DB_PORT=3306
- CB_DB_HOST=host.docker.internal
- CB_API_PORT=4019
- CB_API_HOST=0.0.0.0
- CB_ENCRYPTION_KEY=iuGSZWEs2+SjkrW15a468gIG8089pEUDfZ4XVZD0772TQCTj/kac1Oz7noOge+WRcdj6W8Q0JfqfVXBUPXHuPzAm2fBBRC9xjCdVqbAYk/0=
- CB_MAIL_HOST=${SYSTEM_EMAIL_USER}
- CB_MAIL_USER=${SYSTEM_EMAIL_PASSSWORD}
- CB_MAIL_PASS=${SYSTEM_EMAIL_SMTP_HOST}
- CB_MAIL_PORT=${SYSTEM_EMAIL_SMTP_PORT}
- CB_MAIL_SECURE=${SYSTEM_EMAIL_SMTP_SECURITY}
- CB_ADMIN_MAIL=soenke@domroese.eu
- CB_OPENAI_API_KEY=
- CB_OPENAI_MODEL=
ports:
- '4018:4018'
- '4019:4019'
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.chartbrew.http.name: 'Chartbrew'
kuma.chartbrew.http.url: 'https://chartbrew.domr.ovh'
kuma.chartbrew.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1,5 +0,0 @@
convertx.domr.ovh,
convertx.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3410
}

View File

@@ -1,21 +0,0 @@
services:
convertx:
image: ghcr.io/c4illin/convertx
container_name: convertx
restart: unless-stopped
ports:
- "3410:3000"
environment:
- JWT_SECRET=a1fd043661609d72a0447a1c2c1b1fc3ac6e1e610ef76af82c4239f59a512ae8f0b5e1d080011567a4b47bc27eeaa79e9653a8caa574957a575c91609ed881cfd96dd7dbed388d0dbada10787b00876d40415efd2f01d131b6de0b4f5e67ea55bf35d69b778aedde31c7f245972a352b713984ee63733d22ca9399940af70c3546b37d3afaa24158547238064b42a4aae9e283c3087a9742b6bda3401c2710bd138c4d90718726b7927c4f13cfbea2b55b85149360dc435257c4d16a31a7e5881806037d2f06c40e7bc5c5a1904a2c8e6c7e35998228fdf6be73b52c76aad82fb0f906d225503adda7e2aed65212b0cdca25c19182cb21957677c36a6c53cd46
volumes:
- /home/soenke/docker-data/convertx/data:/app/data
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.convertx.http.name: 'convertx'
kuma.convertx.http.url: 'https://convertx.domr.ovh'
kuma.convertx.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -1 +0,0 @@
ADMIN_PW_DAILYTXT="Diavid9600"

View File

@@ -1,5 +0,0 @@
dailytxt.domr.ovh,
dailytxt.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8317
}

View File

@@ -1,36 +0,0 @@
services:
dailytxt:
image: phitux/dailytxt:latest
container_name: dailytxt
restart: always
env_file:
- .env
environment:
# That's the internal container-port. You can actually use any portnumber (must match with the one at 'ports')
- PORT=8317
- SECRET_KEY="O+EuLJXNAIxIT7puvNU5KVC4sh3JPRorTMCDRAkekho="
# Set it to False or remove the line completely to disallow registration of new users.
- ALLOW_REGISTRATION=True
# Use this if you want the json log file to be indented. Makes it easier to compare the files. Otherwise just remove this line!
- DATA_INDENT=2
# Set after how many days the JWT token will expire and you have to re-login. Defaults to 30 days if line is ommited.
- JWT_EXP_DAYS=60
# Enable/disable a feature of DailyTxT to auto-check maximal once per hour if there's a newer version of DailyTxT available. Defaults to True if line is ommited.
- ENABLE_UPDATE_CHECK=True
- ADMIN_PASSWORD=${ADMIN_PW_DAILYTXT}
ports:
- "8317:8765"
# perhaps you only want:
# "<host_port>:8765"
volumes:
- "/home/soenke/docker-data/dailytxt/:/app/data/"
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.work.tag.name: 'Work'
kuma.work.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.dailytxt.http.name: 'DailyTxT'
kuma.dailytxt.http.url: 'https://dailytxt.domr.ovh'
kuma.dailytxt.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -1,5 +0,0 @@
dashy.domr.ovh, #donetick
dashy.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8832
}

View File

@@ -1,10 +0,0 @@
services:
dashy:
image: 'lissy93/dashy:latest'
restart: always
container_name: dashy
volumes:
- '/home/soenke/docker-data/dashy/:/app/user-data/'
ports:
- '8832:8080'

View File

@@ -1,5 +0,0 @@
dockpeek.domr.ovh,
dockpeek.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3420
}

View File

@@ -1,13 +0,0 @@
services:
dockpeek:
image: ghcr.io/dockpeek/dockpeek:latest
container_name: dockpeek
environment:
- SECRET_KEY=saljfbhwkhsjgbwjlefn # Set secret key
- USERNAME=soenke # Change default username
- PASSWORD=Diavid9600 # Change default password
ports:
- "3420:8000"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped

22
erugo/docker-compose.yml Normal file
View File

@@ -0,0 +1,22 @@
services:
erugo:
image: wardy784/erugo:latest
user: 0:0
container_name: Erugo
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 80 || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
environment:
VITE_API_URL: https://erugo.yourname.synology.me
JWT_SECRET: dLB%7V$YJ5cPPmeuZCc%0O2E0HMV9Ock!J0dU@mzgYp4IaCR4XVuUn%0i!e@sMUq
APP_KEY: h$@H$BdK8ywbKmwkt^B8TH^mjDQ$w*AideHPhOLTHt$qH2eQvqSWJpxsARKVRxXM
APP_DEBUG: true
APP_TIMEZONE: Europe/Berlin
volumes:
- /home/soenke/docker-data/erugo/data:/var/www/html/storage:rw
ports:
- 9997:80
restart: on-failure:5

View File

@@ -1,5 +0,0 @@
excalidraw.domr.ovh,
excalidraw.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8267
}

View File

@@ -1,18 +1,23 @@
services:
excalidraw:
build:
context: .
args:
- NODE_ENV=development
container_name: excalidraw
image: excalidraw/excalidraw:latest
ports:
- "8267:80"
restart: on-failure
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.excalidraw.http.name: 'excalidraw'
kuma.excalidraw.http.url: 'https://excalidraw.domr.ovh'
kuma.excalidraw.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'
stdin_open: true
healthcheck:
disable: true
environment:
- NODE_ENV=development
volumes:
- /home/soenke/docker-data/excalidraw/data:/opt/node_app/app:delegated
- /home/soenke/docker-data/excalidraw/package.json:/opt/node_app/package.json
- /home/soenke/docker-data/excalidraw/yarn.lock:/opt/node_app/yarn.lock
- notused:/opt/node_app/app/node_modules
volumes:
notused:

View File

@@ -1,5 +0,0 @@
rss.domr.ovh,
rss.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8884
}

View File

@@ -1,30 +0,0 @@
services:
freshrss:
image: lscr.io/linuxserver/freshrss:latest
container_name: freshrss
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- OIDC_ENABLED=1
- OIDC_PROVIDER_METADATA_URL=https://auth.domr.ovh/application/o/freshrss/.well-known/openid-configuration
- OIDC_CLIENT_ID=9O7GtmlyNAxaTwsO5Abg9BWCaCpHyzR551VC94qO
- OIDC_CLIENT_SECRET=ZPTML006HvR0yoRonHIZdvSMIcHYjdkRcC8QT6DERZYmitTIV5cCZhEESQNaKW4vEI7i7z1tC4brbEMaC9ERsfxlOlm6ZTVVVc8kcfYIthUGLijhi2livaJzwSYjFaWZ
- OIDC_X_FORWARDED_HEADERS=X-Forwarded-Port X-Forwarded-Proto X-Forwarded-Host
- OIDC_SCOPES=openid email profile
volumes:
- /home/soenke/docker-data/freshrss:/config
ports:
- 8884:80
restart: unless-stopped
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.freshrss.http.name: 'freshrss'
kuma.freshrss.http.url: 'https://rss.domr.ovh'
kuma.freshrss.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1,5 +0,0 @@
for dir in *; do
if [ -d "$dir" ]; then
( cd "$dir" && touch Caddyfilepart )
fi
done

View File

@@ -1,5 +0,0 @@
git.domr.ovh,
git.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:8418
}

View File

@@ -1,5 +0,0 @@
guac.domr.ovh,
guac.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:6080
}

View File

@@ -84,6 +84,10 @@
# 0.61 2024-07-27 fix networks + version 3.0
# 0.62 2024-07-27 fix
#####################################################################################
#the attribute `version` is obsolete, it will be ignored, please remove it to avoid potential confusion
#version: '3.0'
# networks
# create a network 'guacnetwork_compose' in mode 'bridged'
networks:
@@ -141,14 +145,4 @@ services:
## enable next line when using nginx
## - 8080/tcp
restart: always
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.guacamole.http.name: 'guacamole'
kuma.guacamole.http.url: 'https://guac.domr.ovh/guacamole'
kuma.guacamole.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -1,5 +0,0 @@
haus.domr.ovh,
haus.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8472
}

View File

@@ -1,9 +0,0 @@
services:
timesy:
image: ghcr.io/awwwsm/haus
logging:
options:
max-size: 1g
restart: always
ports:
- '8472:8080'

12
homarr/docker-compose.yml Normal file
View File

@@ -0,0 +1,12 @@
services:
homarr:
container_name: homarr
image: ghcr.io/homarr-labs/homarr:latest
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration
- /home/soenke/docker-data/homarr/appdata:/appdata
environment:
- SECRET_ENCRYPTION_KEY=c99349e72b4267a0ba7a19fa2de53cfdbd73708974338d2abe36f1379fe8ba7c
ports:
- '7575:7575'

View File

@@ -1,5 +0,0 @@
homebox.domr.ovh,
homebox.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3100
}

View File

@@ -1,24 +0,0 @@
services:
homebox:
image: ghcr.io/hay-kot/homebox:latest
# image: ghcr.io/hay-kot/homebox:latest-rootless
container_name: homebox
restart: always
environment:
- HBOX_LOG_LEVEL=info
- HBOX_LOG_FORMAT=text
- HBOX_WEB_MAX_UPLOAD_SIZE=10
volumes:
- /home/soenke/docker-data/homebox/data:/data/
ports:
- 3100:7745
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.homarr.http.name: 'HomeBox'
kuma.homarr.http.url: 'https://homebox.domr.ovh/'
kuma.homarr.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1,6 +0,0 @@
homepage.domr.ovh:80,
homepage.domr.ovh:443,
homepage.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:3891
}

View File

@@ -1,18 +1,4 @@
services:
dockerproxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: dockerproxy
environment:
- CONTAINERS=1 # Allow access to viewing containers
- SERVICES=1 # Allow access to viewing services (necessary when using Docker Swarm)
- TASKS=1 # Allow access to viewing tasks (necessary when using Docker Swarm)
- POST=0 # Disallow any POST operations (effectively read-only)
ports:
- 127.0.0.1:2375:2375
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro # Mounted as read-only
restart: unless-stopped
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
@@ -22,15 +8,4 @@ services:
- /home/soenke/docker-data/homepage/config:/app/config # Make sure your local config directory exists
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
environment:
- HOMEPAGE_ALLOWED_HOSTS=gethomepage.dev,homepage.domr.ovh,homepage.home.domroese.eu,homepage.domr.ovh:80,homepage.domr.ovh:443 # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.homepage.http.name: 'homepage'
kuma.homepage.http.url: 'https://homepage.domr.ovh/'
kuma.homepage.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'
HOMEPAGE_ALLOWED_HOSTS: gethomepage.dev,homepage.home.domroese.eu # required, may need port. See gethomepage.dev/installation/#homepage_allowed_hosts

View File

@@ -1,2 +0,0 @@
SERVER_ADDRESS=https://huly.domr.ovh
HULY_VERSION=7

View File

@@ -1,5 +0,0 @@
huly.domr.ovh,
huly.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8087
}

View File

@@ -1,157 +0,0 @@
version: "3"
services:
mongodb:
image: "mongo:7-jammy"
container_name: mongodb
environment:
- PUID=1000
- PGID=1000
volumes:
- /home/soenke/docker-data/huly/db:/data/db
ports:
- 27017:27017
restart: unless-stopped
minio:
image: "minio/minio"
command: server /data --address ":9000" --console-address ":9001"
ports:
- 9000:9000
- 9001:9001
volumes:
- /home/soenke/docker-data/huly/files:/data
restart: unless-stopped
elastic:
image: "elasticsearch:7.14.2"
command: |
/bin/sh -c "./bin/elasticsearch-plugin list | grep -q ingest-attachment || yes | ./bin/elasticsearch-plugin install --silent ingest-attachment;
/usr/local/bin/docker-entrypoint.sh eswrapper"
volumes:
- /home/soenke/docker-data/huly/elastic:/usr/share/elasticsearch/data
ports:
- 9200:9200
environment:
- ELASTICSEARCH_PORT_NUMBER=9200
- BITNAMI_DEBUG=true
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms1024m -Xmx1024m
- http.cors.enabled=true
- http.cors.allow-origin=http://localhost:8082
healthcheck:
interval: 20s
retries: 10
test: curl -s http://localhost:9200/_cluster/health | grep -vq '"status":"red"'
restart: unless-stopped
account:
image: hardcoreeng/account:${HULY_VERSION}
links:
- mongodb
- minio
ports:
- 3000:3000
environment:
- SERVER_PORT=3000
- SERVER_SECRET=secret
- MONGO_URL=mongodb://mongodb:27017
- TRANSACTOR_URL=ws://transactor:3333;ws://${SERVER_ADDRESS}:3333
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- FRONT_URL=http://front:8080
- INIT_WORKSPACE=demo-tracker
- MODEL_ENABLED=*
- ACCOUNTS_URL=http://${SERVER_ADDRESS}:3000
- ACCOUNT_PORT=3000
restart: unless-stopped
front:
image: hardcoreeng/front:${HULY_VERSION}
links:
- mongodb
- minio
- elastic
- collaborator
- transactor
ports:
- 8087:8080
environment:
- SERVER_PORT=8080
- SERVER_SECRET=secret
- ACCOUNTS_URL=http://${SERVER_ADDRESS}:3000
- REKONI_URL=http://${SERVER_ADDRESS}:4004
- CALENDAR_URL=http://${SERVER_ADDRESS}:8095
- GMAIL_URL=http://${SERVER_ADDRESS}:8088
- TELEGRAM_URL=http://${SERVER_ADDRESS}:8086
- UPLOAD_URL=/files
- ELASTIC_URL=http://elastic:9200
- COLLABORATOR_URL=ws://${SERVER_ADDRESS}:3078
- COLLABORATOR_API_URL=http://${SERVER_ADDRESS}:3078
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- MONGO_URL=mongodb://mongodb:27017
- TITLE=Huly Self Hosted
- DEFAULT_LANGUAGE=en
- LAST_NAME_FIRST=true
restart: unless-stopped
collaborator:
image: hardcoreeng/collaborator:${HULY_VERSION}
links:
- mongodb
- minio
- transactor
ports:
- 3078:3078
environment:
- COLLABORATOR_PORT=3078
- SECRET=secret
- ACCOUNTS_URL=http://account:3000
- UPLOAD_URL=/files
- MONGO_URL=mongodb://mongodb:27017
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
restart: unless-stopped
transactor:
image: hardcoreeng/transactor:${HULY_VERSION}
links:
- mongodb
- elastic
- minio
- rekoni
- account
ports:
- 3333:3333
environment:
- SERVER_PORT=3333
- SERVER_SECRET=secret
- SERVER_CURSOR_MAXTIMEMS=30000
- ELASTIC_URL=http://elastic:9200
- ELASTIC_INDEX_NAME=huly_storage_index
- MONGO_URL=mongodb://mongodb:27017
- METRICS_CONSOLE=false
- METRICS_FILE=metrics.txt
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- REKONI_URL=http://rekoni:4004
- FRONT_URL=http://${SERVER_ADDRESS}:8087
- SERVER_PROVIDER=ws
- ACCOUNTS_URL=http://account:3000
- LAST_NAME_FIRST=true
- UPLOAD_URL=http://${SERVER_ADDRESS}/files
restart: unless-stopped
rekoni:
image: hardcoreeng/rekoni-service:${HULY_VERSION}
ports:
- 4004:4004
environment:
- SECRET=secret
deploy:
resources:
limits:
memory: 500M
restart: unless-stopped
volumes:
db:
files:
elastic:
etcd:

View File

@@ -1,5 +0,0 @@
immich.domr.ovh,
immich.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.194:2283
}

View File

@@ -1,7 +0,0 @@
ittools.domr.ovh:443,
ittools.home.domroese.eu:443,
ittools.domr.ovh:80,
ittools.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9080
}

View File

@@ -8,13 +8,3 @@ services:
- UID=1000
- GID=1000
image: 'corentinth/it-tools:latest'
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.ittools.http.name: 'ittools'
kuma.ittools.http.url: 'https://ittools.domr.ovh/'
kuma.ittools.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

2
jenkins/.env Normal file
View File

@@ -0,0 +1,2 @@
JENKINS_HOME_PATH=/home/soenke/docker-data/jenkins/jenkins_sandbox_home
JENKINS_AGENT_SSH_PUBLIC_KEY="<< leave empty for now >>"

65
jenkins/README.md Normal file
View File

@@ -0,0 +1,65 @@
# Jenkins with Docker Compose
Jenkins docker compose file (and instructions) to configure your jenkins controller and agent.
## Configuring Jenkins
1. Create the **jenkins_home** folder in your local environment
```
mkdir jenkins_sandbox_home
```
2. Create a file named **.env** and add the following:
```yml
JENKINS_HOME_PATH=/home/user/jenkins_sandbox_home # your local jenkins_home path.
JENKINS_AGENT_SSH_PUBLIC_KEY=<< leave empty for now >>
```
3. Run Jenkins controller:
```bash
docker-compose up -d
```
4. Get the password to proceed installation:
```bash
docker logs jenkins_sandbox | less
```
5. Go to <http://localhost:8080/> and enter the password.
6. Select **Install Suggested Plugins**, create the **admin** user and password, and leave the Jenkins URL <http://localhost:8080/>.
## Configuring Jenkins Agent
1. Use ssh-keygen to create a new key pair:
```bash
ssh-keygen -t rsa -f jenkins_key
```
2. Go to Jenkins and click **Manage jenkins** > **Manage credentials**.
3. Under **Stores scoped to Jenkins**, click **Global credentials**, next click **Add credentials** and set the following options:
- Select **SSH Username with private key**.
- Limit the scope to **System**.
- Give the credential an **ID**.
- Provide a **description**.
- Enter a **username**.
- Under Private Key check **Enter directly**.
- Paste the content of private key in the text box.
4. Click **Ok** to save.
5. Paste the public key on the **JENKINS_AGENT_SSH_PUBLIC_KEY** variable, in the **.env** file.
6. Recreate the services:
```bash
docker-compose down
docker-compose up -d
```

View File

@@ -0,0 +1,23 @@
# Jenkins Sandbox
version: "3"
services:
jenkins:
image: jenkins/jenkins:lts
container_name: jenkins_sandbox
privileged: true
user: root
ports:
- 8040:8080
- 50000:50000
volumes:
- ${JENKINS_HOME_PATH}:/var/jenkins_home
- /var/run/docker.sock:/var/run/docker.sock
agent:
image: jenkins/ssh-agent:jdk11
container_name: jenkins_sandbox_agent
privileged: true
user: root
expose:
- 22
environment:
- JENKINS_AGENT_SSH_PUBKEY=${JENKINS_AGENT_SSH_PUBLIC_KEY}

View File

@@ -1,297 +0,0 @@
# ============================================================================
# Journiv Environment Configuration Template
# Copy this file to .env and customize for your deployment.
#
# Usage:
# cp env.template .env
# nano .env
# Required settings:
# - SECRET_KEY
# - DOMAIN_NAME
#
# If using PostgreSQL (DB_DRIVER=postgres), also set:
# - Either POSTGRES_PASSWORD (with optional components), OR
# - DATABASE_URL with a PostgreSQL URL (postgresql:// or postgres://)
# NOT BOTH - specifying both will cause startup to fail
#
# All other settings are optional.
# ============================================================================
# ============================================================================
# REQUIRED SETTINGS
# ============================================================================
# Secret key for JWT token signing and encryption
# Generate with:
# python -c "import secrets; print(secrets.token_urlsafe(32))"
# Or:
# openssl rand -base64 32
SECRET_KEY=gjQ/6282Fdf3p4hK61nx/OHmLJCluIQauz/mm5idPls
# ============================================================================
# DOMAIN CONFIGURATION
# ============================================================================
# Public domain name where Journiv is accessible.
# DO NOT include http:// or https://.
# DO NOT include trailing slashes.
#
# Examples:
DOMAIN_NAME=journiv.domr.ovh
DOMAIN_NAME=journal.domr.ovh
# DOMAIN_NAME=192.168.1.10
#
# WRONG:
# DOMAIN_NAME=https://journiv.example.com
# DOMAIN_NAME=journiv.example.com/
# Protocol scheme for public URLs: http or https
# IMPORTANT: Set to "https" for production deployments, especially when behind a reverse proxy (Traefik, Caddy, Nginx, etc.).
# - Default: "http" (for local development only)
# - Production: "https" (REQUIRED when behind reverse proxy or using SSL/TLS)
#
# Journiv uses this to generate correct public redirect URLs:
# {DOMAIN_SCHEME}://{DOMAIN_NAME}/#/oidc-finish
# {DOMAIN_SCHEME}://{DOMAIN_NAME}/#/login?logout=success
#
DOMAIN_SCHEME=https
# ============================================================================
# DATABASE CONFIGURATION
# ============================================================================
# Database driver selection: "sqlite" (default) or "postgres"
DB_DRIVER=sqlite
# Primary database URL (defaults to SQLite)
# For SQLite: sqlite:////data/journiv.db
# For PostgreSQL: postgresql://user:password@host:5432/dbname
DATABASE_URL=sqlite:////data/journiv.db
# When DB_DRIVER=postgres, you must specify EITHER:
# Option 1: POSTGRES_PASSWORD (with optional components below)
# Option 2: DATABASE_URL with a PostgreSQL URL (postgresql:// or postgres://)
# NOT BOTH - specifying both will cause startup to fail
# PostgreSQL password (required when DB_DRIVER=postgres and using Option 1)
POSTGRES_PASSWORD=oUPnKDY3stjREg7ctUYWrbnn4wNs0Yy3
# (Optional) PostgreSQL components for Docker deployments (used with POSTGRES_PASSWORD)
# Defaults are used if not specified:
# POSTGRES_HOST=postgres
# POSTGRES_USER=journiv
# POSTGRES_DB=journiv_prod (production) or journiv_dev (development)
# POSTGRES_PORT=5432
# POSTGRES_HOST=
#POSTGRES_USER=journiv
#POSTGRES_DB=journiv_prod
#POSTGRES_PORT=5432
# ============================================================================
# APPLICATION SETTINGS
# ============================================================================
APP_NAME=Journiv
# APP_VERSION=latest # Pin to a specific version for production if needed
# DEBUG=false
ENVIRONMENT=production
APP_PORT=8198
# ============================================================================
# API CONFIGURATION
# ============================================================================
# API_V1_PREFIX=/api/v1
# Enable CORS only when the frontend runs on a different origin.
# ENABLE_CORS=false
# Required when ENABLE_CORS=true.
# Example:
# CORS_ORIGINS=https://journiv.example.com,https://myapp.example.net
# CORS_ORIGINS=
# ============================================================================
# SECURITY SETTINGS
# ============================================================================
# ALGORITHM=HS256
# ACCESS_TOKEN_EXPIRE_MINUTES=15
# REFRESH_TOKEN_EXPIRE_DAYS=7
# Disable user signup
# DISABLE_SIGNUP=false
# ============================================================================
# OIDC CONFIGURATION
# ============================================================================
# Enable OIDC login (Pocket-ID, Keycloak, Authentik, etc.)
OIDC_ENABLED=true
# OIDC provider issuer
# Example: https://id.example.com or https://auth.example.com/realms/default
OIDC_ISSUER=https://auth.domr.ovh/application/o/journiv
# OIDC client credentials
OIDC_CLIENT_ID="L1wmsh8BqoGlQRO6ZULMkTCRGOhu9M0L3Im7tiGd"
OIDC_CLIENT_SECRET=MPhUBLM8p4soNCfpfbp0pgAxoaqRHj36EvTzALVicoVy7Tf1UrVh2ckXJtciGYNscuQpQ78c8j8MXb1a1pn3bvOcnGERSYC2uT9s4AXhchD5yTKBBfFEz4l15OMZNqvG
# OIDC redirect URI
# Must match provider configuration EXACTLY.
# Example:
# OIDC_REDIRECT_URI=https://journiv.example.com/api/v1/auth/oidc/callback
OIDC_REDIRECT_URI=https://journal.domr.ovh/api/v1/auth/oidc/callback
# OIDC scopes to request
# OIDC_SCOPES="openid email profile"
# Automatically create users from OIDC claims
# OIDC_AUTO_PROVISION=true
# Disable SSL verification (ONLY for local development with self-signed certs)
# OIDC_DISABLE_SSL_VERIFY=false
# Allow OIDC over HTTP (INSECURE). Recommended only for advanced users in isolated homelabs.
# Default: false
# OIDC_ALLOW_INSECURE_PROD=false
# ============================================================================
# REDIS CONFIGURATION (Optional)
# ============================================================================
# Optional Redis URL for OIDC state caching, rate-limit persistence, and Celery
# Example: redis://localhost:6379/0
# REDIS_URL=
# ============================================================================
# CELERY CONFIGURATION (For Import/Export)
# ============================================================================
# Celery broker and result backend
# If not set, defaults to REDIS_URL
# Examples:
# CELERY_BROKER_URL=redis://localhost:6379/0
# CELERY_RESULT_BACKEND=redis://localhost:6379/0
# CELERY_BROKER_URL=
# CELERY_RESULT_BACKEND=
# Celery task serialization (default: json)
# CELERY_TASK_SERIALIZER=json
# CELERY_RESULT_SERIALIZER=json
# CELERY_TIMEZONE=UTC
# CELERY_ENABLE_UTC=true
# ============================================================================
# IMPORT/EXPORT CONFIGURATION
# ============================================================================
# Maximum file size for import/export operations (in MB)
# IMPORT_EXPORT_MAX_FILE_SIZE_MB=500
# Days to keep export files before automatic cleanup
# Set to -1 to disable automatic cleanup.
# EXPORT_CLEANUP_DAYS=7
# Directories for import/export operations
# IMPORT_TEMP_DIR=/data/imports/temp
# EXPORT_DIR=/data/exports
# ============================================================================
# INTEGRATIONS (IMMICH)
# ============================================================================
# Default Immich base URL for all users on the instance.
# If set, users can leave the Immich URL field empty in the UI.
# Immich URL specified per user level in settting screens can be used to override this.
# Example: IMMICH_BASE_URL=https://immich.example.com
# Example: IMMICH_BASE_URL=http://192.168.1.1:2283
# IMMICH_BASE_URL=
# ============================================================================
# CONTENT SECURITY POLICY (CSP)
# ============================================================================
# ENABLE_CSP=true
# ENABLE_HSTS=true
# ENABLE_CSP_REPORTING=true
# Where browsers should POST CSP violation reports
# CSP_REPORT_URI=/api/v1/security/csp-report
# ============================================================================
# FILE STORAGE
# ============================================================================
# Directory for user-uploaded files and media
# MEDIA_ROOT=/data/media
# Maximum allowed upload size in MB
# MAX_FILE_SIZE_MB=50
# Allowed media MIME types (comma-separated)
# ALLOWED_MEDIA_TYPES=image/jpeg,image/png,image/gif,image/webp,image/heic,video/mp4,video/avi,video/mov,video/webm,video/x-m4v,audio/mpeg,audio/wav,audio/ogg,audio/m4a,audio/aac
# Allowed file extensions (comma-separated)
# ALLOWED_FILE_EXTENSIONS=.jpg,.jpeg,.png,.gif,.webp,.heic,.mp4,.avi,.mov,.webm,.m4v,.mp3,.wav,.ogg,.m4a,.aac
# Signed media URL TTL in seconds (for images and general media)
# Default 5 minutes
# MEDIA_SIGNED_URL_TTL_SECONDS=300
# Signed video URL TTL in seconds (for video streaming, longer to support playback of large files)
# Default 20 mins
# MEDIA_SIGNED_URL_VIDEO_TTL_SECONDS=1200
# Signed thumbnail URL TTL in seconds (used for caching thumbnails)
# Default 24 hours
# MEDIA_THUMBNAIL_SIGNED_URL_TTL_SECONDS=86400
# Grace period in seconds for signed media URL expiration checks
# MEDIA_SIGNED_URL_GRACE_SECONDS=60
# ============================================================================
# LOGGING
# ============================================================================
# LOG_LEVEL=INFO
# LOG_FILE=
# LOG_DIR=/data/logs
# ============================================================================
# EXTERNAL API CONFIGURATION
# ============================================================================
# OpenWeather API keys for weather data fetching
# Get your free API key at: https://openweathermap.org/api
# Weather service is optional - if not configured, weather features will be disabled
# OPEN_WEATHER_API_KEY_25=your-openweather-2-5-api-key-here
# OPEN_WEATHER_API_KEY_30=your-openweather-3-0-api-key-here
# ============================================================================
# RATE LIMITING
# ============================================================================
# Enable rate limiting (protects login endpoints)
# RATE_LIMITING_ENABLED=true
# Backend for rate-limit storage (default: in-memory)
# Example for Redis: redis://localhost:6379/1
# RATE_LIMIT_STORAGE_URI=memory://

View File

@@ -1,5 +0,0 @@
journal.domr.ovh,
journiv.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8198
}

View File

@@ -1,191 +0,0 @@
# Journiv Production Docker Compose (PostgreSQL).
# Journiv recommends using PostgreSQL based deployment over SQLite.
#
# Usage:
# docker compose up -d
#
# Required Environment Variables:
# SECRET_KEY - Generate with: python -c "import secrets; print(secrets.token_urlsafe(32))"
# DOMAIN_NAME - Needed when running in same-origin SPA mode (ENABLE_CORS=false).
# POSTGRES_PASSWORD - Must be provided in .env file.
x-base-env: &base-env
REDIS_URL: redis://valkey:6379/0
CELERY_BROKER_URL: redis://valkey:6379/0
CELERY_RESULT_BACKEND: redis://valkey:6379/0
DB_DRIVER: sqlite
# POSTGRES_HOST: postgres
x-celery-common: &celery-common
image: swalabtech/journiv-app:${APP_VERSION:-latest}
env_file: .env
volumes:
- /home/soenke/docker-data/journiv/app_data:/data
depends_on:
postgres:
condition: service_healthy
valkey:
condition: service_healthy
networks:
- backend
restart: unless-stopped
logging:
driver: "json-file"
options:
max-size: "50m"
max-file: "5"
deploy:
resources:
limits:
cpus: "1.0"
memory: 1g
reservations:
memory: 256m
x-app-common: &app-common
image: swalabtech/journiv-app:${APP_VERSION:-latest}
env_file: .env
volumes:
- /home/soenke/docker-data/journiv/app_data:/data
depends_on:
postgres:
condition: service_healthy
valkey:
condition: service_healthy
networks:
- backend
restart: unless-stopped
logging:
driver: "json-file"
options:
max-size: "50m"
max-file: "5"
x-celery-healthcheck: &celery-healthcheck
interval: 30s
timeout: 10s
retries: 5
start_period: 40s
services:
postgres:
image: postgres:18.1
container_name: journiv-postgres-db
environment:
- POSTGRES_USER=${POSTGRES_USER:-journiv}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # must provide a password in .env file
- POSTGRES_DB=${POSTGRES_DB:-journiv_prod}
volumes:
- /home/soenke/docker-data/journiv/postgres_data:/var/lib/postgresql
networks:
- backend
restart: unless-stopped
deploy:
resources:
limits:
cpus: "1.0"
memory: 1g
reservations:
memory: 256m
logging:
driver: "json-file"
options:
max-size: "50m"
max-file: "5"
healthcheck:
test:
[
"CMD-SHELL",
"pg_isready -U ${POSTGRES_USER:-journiv} -d ${POSTGRES_DB:-journiv_prod}",
]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
valkey:
# Journiv uses Valkey which is similar to Redis for cache.
image: valkey/valkey:9.0-alpine
container_name: journiv-valkey-cache
restart: unless-stopped
volumes:
- /home/soenke/docker-data/journiv/valkey_data:/data
networks:
- backend
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
celery-worker:
<<: *celery-common
container_name: journiv-celery-worker
command: celery -A app.core.celery_app worker --loglevel=info
environment:
<<: *base-env
SERVICE_ROLE: celery-worker
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
healthcheck:
<<: *celery-healthcheck
celery-beat:
<<: *celery-common
container_name: journiv-celery-beat
command: celery -A app.core.celery_app beat --loglevel=info --scheduler redbeat.RedBeatScheduler --pidfile=/tmp/celerybeat.pid
environment:
<<: *base-env
SERVICE_ROLE: celery-beat
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
REDBEAT_REDIS_URL: redis://valkey:6379/2
healthcheck:
<<: *celery-healthcheck
app:
<<: *app-common
container_name: journiv-postgres-app
ports:
- "${APP_PORT:-8000}:8000"
environment:
<<: *base-env
SERVICE_ROLE: app
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
ENVIRONMENT: production
RATE_LIMIT_STORAGE_URI: redis://valkey:6379/1
networks:
- backend
- frontend
healthcheck:
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
deploy:
resources:
limits:
cpus: "2.0"
memory: 2g
reservations:
memory: 512m
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.journiv.tag.name: 'Work'
kuma.journiv.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.journiv.http.name: 'journiv'
kuma.journiv.http.url: 'https://journiv.domr.ovh'
kuma.journiv.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'
#volumes:
# app_data:
# postgres_data:
# valkey_data:
networks:
backend:
driver: bridge
frontend:
driver: bridge

View File

@@ -1,5 +0,0 @@
kopia.domr.ovh,
kopia.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:51515
}

View File

@@ -30,14 +30,4 @@ services:
- /home/soenke/docker-data/kopia/repository:/repository
# Mount path for browsing mounted snaphots
- /home/soenke/docker-data/kopia/tmp:/tmp:shared
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.kopia.http.name: 'kopia'
kuma.kopia.http.url: 'https://kopia.domr.ovh/repo'
kuma.kopia.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "homelab", "value": "" }]'

View File

@@ -1,5 +0,0 @@
mealie.domr.ovh,
mealie.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:9925
}

View File

@@ -17,7 +17,7 @@ services:
environment:
POSTGRES_DB: mealie
POSTGRES_USER: mealie
POSTGRES_PASSWORD: "$hYx%uyO$IAUX3EhXvUtP$GMe4TLgoiLrBTN9nrXh&q8C0TWqp&ku%dEOUPT4GMZ"
POSTGRES_PASSWORD: $hYx%uyO$IAUX3EhXvUtP$GMe4TLgoiLrBTN9nrXh&q8C0TWqp&ku%dEOUPT4GMZ
restart: on-failure:5
mealie:
@@ -43,38 +43,18 @@ services:
BASE_URL: https://mealie.home.domroese.eu
DB_ENGINE: postgres
POSTGRES_USER: mealie
POSTGRES_PASSWORD: "$hYx%uyO$IAUX3EhXvUtP$GMe4TLgoiLrBTN9nrXh&q8C0TWqp&ku%dEOUPT4GMZ"
POSTGRES_PASSWORD: $hYx%uyO$IAUX3EhXvUtP$GMe4TLgoiLrBTN9nrXh&q8C0TWqp&ku%dEOUPT4GMZ
POSTGRES_SERVER: mealie-db
POSTGRES_PORT: 5432
POSTGRES_DB: mealie
SMTP_HOST: ${SYSTEM_EMAIL_SMTP_HOST}
SMTP_PORT: ${SYSTEM_EMAIL_SMTP_PORT}
SMTP_HOST: smtp.gmail.com
SMTP_PORT: 587
SMTP_FROM_NAME: Mealie
SMTP_AUTH_STRATEGY: TLS # Options: TLS, SSL, NONE
SMTP_FROM_EMAIL: ${SYSTEM_EMAIL_USER}
SMTP_USER: ${SYSTEM_EMAIL_USER}
SMTP_PASSWORD: ${SYSTEM_EMAIL_PASSSWORD}
OIDC_AUTH_ENABLED: true
OIDC_PROVIDER_NAME: auth.domr.ovh
OIDC_CONFIGURATION_URL: https://auth.home.domroese.eu/application/o/mealie/.well-known/openid-configuration
OIDC_CLIENT_ID: oVmVbL9Ehd1KAjSgAseAMZw4LHV6gmUfsFEf2Akp
OIDC_CLIENT_SECRET: WP2hs4qKjmEpKQabIvKCBgDwtlm534It526vs3Mg9lrBGgzswG9sCh0nw7ieW9y7D7OMRe0x2gkcHqcdP37LVMBgpR3f2rABSlOduhyZhPQKOUNBk79AQNxYr23Mdaud
OIDC_SIGNUP_ENABLED: true
OIDC_USER_GROUP: mealie-users
OIDC_ADMIN_GROUP: mealie-admins
OIDC_AUTO_REDIRECT: true # Optional: The login page will be bypassed and you will be sent directly to your Identity Provider.
OIDC_REMEMBER_ME: true # Optional: By setting this value to true, a session will be extended as if "Remember Me" was checked.
SMTP_FROM_EMAIL: Your-own-gmail-address
SMTP_USER: Your-own-gmail-address
SMTP_PASSWORD: Your-own-app-password
restart: on-failure:5
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.mealie.http.name: 'mealie'
kuma.mealie.http.url: 'https://mealie.domr.ovh/'
kuma.mealie.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'
depends_on:
db:
condition: service_healthy

View File

@@ -1,5 +0,0 @@
memos.domr.ovh,
memos.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:5230
}

View File

@@ -1,22 +0,0 @@
services:
memos:
image: neosmemo/memos:stable
container_name: memos
restart: unless-stopped
ports:
- "5230:5230"
volumes:
- /home/soenke/docker-data/memos/data:/var/opt/memos
environment:
- MEMOS_MODE=prod
- MEMOS_PORT=5230
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.memos.http.name: 'memos'
kuma.memos.http.url: 'https://memos.domr.ovh/'
kuma.memos.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1,123 +0,0 @@
Step-by-Step Instructions
📀 1. Clone the OS to the New SSD
Well start by cloning your OS from /dev/nvme0 to the new SSD (/dev/nvme1).
a) Partition and Format the New SSD
First, partition the new SSD (/dev/nvme1) and create the root partition:
# Launch fdisk to partition the new SSD
sudo fdisk /dev/nvme1
Type g to create a GPT partition table (if it's not already).
Type n to create a new partition, use the entire disk.
Type w to write the partition table.
Then, format the new partition (/dev/nvme1p1):
sudo mkfs.ext4 /dev/nvme1p1
b) Mount the New SSD
Create a mount point and mount the new SSD:
sudo mkdir /mnt/ssd
sudo mount /dev/nvme1p1 /mnt/ssd
c) Clone the OS from /dev/nvme0 to /dev/nvme1
Now, well copy the entire root filesystem, excluding /home, to the new SSD:
sudo rsync -aAXv / --exclude=/home --exclude=/proc --exclude=/sys \
--exclude=/dev --exclude=/run --exclude=/mnt --exclude=/tmp \
/mnt/ssd/
###################################################################################################
This command copies the entire OS and system data but excludes /home, as well sync that separately later.
🧩 2. Prepare the New SSD to Boot
a) Mount Necessary Filesystems and Chroot
To make the new installation bootable, we need to bind mount critical filesystems and chroot into the new root.
for dir in dev proc sys; do
sudo mount --bind /$dir /mnt/ssd/$dir
done
If you are using UEFI, you might also need to mount the EFI partition:
sudo mount /dev/nvme0p1 /mnt/ssd/boot/efi # Adjust if needed
Now, enter the chroot environment:
sudo chroot /mnt/ssd
b) Update /etc/fstab
Make sure /etc/fstab points to the correct root filesystem and removes any /home partition references.
blkid # Get the UUID of /dev/nvme1p1
nano /etc/fstab
Ensure the / entry is updated to use the new SSD, for example:
UUID=<new-uuid> / ext4 defaults 0 1
And remove or comment out any /home partition entry.
c) Install GRUB on the New SSD
Now install GRUB to make the system bootable from /dev/nvme1.
grub-install /dev/nvme1
update-grub
exit
🔄 3. Reboot from the New SSD
Reboot the system.
Go into BIOS/UEFI and set /dev/nvme1 as the primary boot drive.
Boot into the new SSD.
📁 4. Sync /home from /dev/sda (Old Home Drive)
Now, well sync the /home data from the old drive (/dev/sda) onto the new root partition.
a) Mount the Old /home Drive
First, mount /dev/sda (the old /home drive):
sudo mount /dev/sda1 /mnt/oldhome
b) Sync /home to the New SSD
Now, copy the /home data:
sudo rsync -aAXv /mnt/oldhome/ /home/
Make sure /home is mounted correctly on /dev/nvme1p1 (the new SSD) by checking with df -h or lsblk.
🧹 5. Cleanup (Optional)
Once you verify everything works as expected:
Remove /home entry from /etc/fstab if it exists.
You can either repurpose or wipe the old drives (/dev/nvme0 and /dev/sda).
Confirm everything is working fine and youre now booting from /dev/nvme1.
✅ Final Checks
Check disk usage:
df -h
Verify partitioning:
lsblk
Verify boot order in BIOS/UEFI to make sure you're booting from /dev/nvme1.
This approach ensures you move everything safely, with minimal risk of data loss.
Let me know if you encounter any issues or need further clarification!

Binary file not shown.

Binary file not shown.

View File

@@ -1 +0,0 @@
/var/run/mysqld/mysqld.sock

Binary file not shown.

Binary file not shown.

View File

View File

@@ -1,13 +0,0 @@
nas.domr.ovh,
nas.home.domroese.eu {
tls soenke@domroese.eu {
client_auth {
mode request
}
}
reverse_proxy https://192.168.1.194:5001 {
transport http {
tls_insecure_skip_verify # Disable TLS Verification, as we don't have a real certificate on the nas
}
}
}

View File

@@ -1,4 +0,0 @@
ntfy.domr.ovh {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8234
}

View File

@@ -1,37 +0,0 @@
services:
ntfy:
image: binwiederhier/ntfy
container_name: ntfy
command:
- serve
environment:
TZ: Europe/Berlin # optional: set desired timezone
NTFY_BASE_URL: http://ntfy.domr.ovh
NTFY_AUTH_DEFAULT_ACCESS: deny-all
NTFY_BEHIND_PROXY: true
NTFY_ATTACHMENT_CACHE_DIR: /var/lib/ntfy/attachments
NTFY_ENABLE_LOGIN: true
NTFY_UPSTREAM_BASE_URL: https://ntfy.domr.ovh
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.jenkins.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'
kuma.ntfy.http.name: 'Notify'
kuma.ntfy.http.url: 'https://ntfy.domr.ovh'
volumes:
- /home/soenke/docker-data/ntfy/cache:/var/cache/ntfy
- /home/soenke/docker-data/ntfy/etc:/etc/ntfy
ports:
- 8234:80
healthcheck: # optional: remember to adapt the host:port to your environment
test: ["CMD-SHELL", "wget -q --tries=1 http://localhost:8234/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 40s
restart: unless-stopped

View File

@@ -1,5 +0,0 @@
chat.domr.ovh,
chat.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1180
}

View File

@@ -12,28 +12,10 @@ services:
image: "ghcr.io/open-webui/open-webui:main"
restart: always
container_name: open-webui
environment:
OAUTH_CLIENT_ID: b8Ktsot896DWYOMpSeKCyA30b0SfV5hW1qSpQtEh
OAUTH_CLIENT_SECRET: qLW9FNTRIhWpS51Ynx1gx0AiB0x0UGrs5FVukyBZyDNrNYc6NLdotHJq9U6giQJ48TnIHpE3mHvbCFvXnR8jpeV5o50CgbLXGXATHb0Om2K80TvFLSgAhbU8oIBvdSvj
OAUTH_PROVIDER_NAME: auth.domr.ovh
OPENID_PROVIDER_URL: https://auth.domr.ovh/application/o/openwebui/.well-known/openid-configuration
OPENID_REDIRECT_URI: https://chat.domr.ovh/oauth/oidc/callback
ENABLE_OAUTH_SIGNUP: 'true'
volumes:
- /home/soenke/docker-data/ollama/open-webui:/app/backend/data
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- 1180:8080
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.ollama.http.name: 'ollama'
kuma.ollama.http.url: 'https://chat.domr.ovh/'
kuma.ollama.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1,5 +0,0 @@
omnitools.domr.ovh,
omnitools.home.domroese.eu:443 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:8579
}

View File

@@ -5,13 +5,3 @@ services:
restart: unless-stopped
ports:
- "8579:80"
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.omnitools.http.name: 'omnitools'
kuma.omnitools.http.url: 'https://omnitools.domr.ovh/'
kuma.omnitools.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1,7 +0,0 @@
paperless.domr.ovh:443,
paperless.home.domroese.eu:443,
paperless.domr.ovh:80,
paperless.home.domroese.eu:80 {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:1000
}

View File

@@ -1,9 +1,9 @@
PAPERLESS_SECRET_KEY="UkvhWQ5frosxA%JKY5XGGtVABxjD87QKRqNX4uM&F8UsFh@MkQYZ@4bkCwJmazLKen346zbZA$q$DaKZB*wrF8g&8uyycigab67uTNGa5TirFA7UHSQF2qLG%fj7Kp$9"
PAPERLESS_URL=https://paperless.domr.ovh
PAPERLESS_ALLOWED_HOSTS=paperless.domr.ovh
PAPERLESS_CSRF_TRUSTED_ORIGINS=['https://paperless.domr.ovh']
PAPERLESS_CORS_ALLOWED_HOSTS=https://paperless.domr.ovh
PAPERLESS_URL=https://paperless.home.domroese.eu
PAPERLESS_ALLOWED_HOSTS=paperless.home.domroese.eu
PAPERLESS_CSRF_TRUSTED_ORIGINS=['https://paperless.home.domroese.eu']
PAPERLESS_CORS_ALLOWED_HOSTS=https://paperless.home.domroese.eu
PAPERLESS_ADMIN_MAIL=soenke@domroese.eu
PAPERLESS_CONSUMER_ENABLE_BARCODES=true
PAPERLESS_TIME_ZONE=Europe/Berlin

View File

@@ -30,6 +30,7 @@
# For more extensive installation and update instructions, refer to the
# documentation.
version: "3.4"
services:
broker:
image: docker.io/library/redis:7
@@ -77,34 +78,6 @@ services:
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
PAPERLESS_APPS: allauth.socialaccount.providers.openid_connect
PAPERLESS_SOCIALACCOUNT_PROVIDERS: >
{
"openid_connect": {
"APPS": [
{
"provider_id": "authentik",
"name": "auth.domr.ovh",
"client_id": "U9wsU9xPEU6oWEWO2jhiPr0OhUPcG3XvA8nGhPki",
"secret": "xFpnKcYaNcEuVReBWT6sGTprvUtYE0AT3lnHHshY8wKJlOw1NGsvtqIYqTgdp4VkTjLk3ZHr1Th4LaQYiciicYJe7LtpTa5qX3ICDBRJhs2HGX40sJMQ1LCnnEUrS9fZ",
"settings": {
"server_url": "https://auth.domr.ovh/application/o/paperless/.well-known/openid-configuration"
}
}
],
"OAUTH_PKCE_ENABLED": "True"
}
}
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.paperless.http.name: 'paperless'
kuma.paperless.http.url: 'https://paperless.domr.ovh/'
kuma.paperless.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'
gotenberg:
image: docker.io/gotenberg/gotenberg:7.10

281
penpot/docker-compose.yaml Normal file
View File

@@ -0,0 +1,281 @@
## Common flags:
# demo-users
# email-verification
# log-emails
# log-invitation-tokens
# login-with-github
# login-with-gitlab
# login-with-google
# login-with-ldap
# login-with-oidc
# login-with-password
# prepl-server
# registration
# secure-session-cookies
# smtp
# smtp-debug
# telemetry
# webhooks
##
## You can read more about all available flags and other
## environment variables here:
## https://help.penpot.app/technical-guide/configuration/#advanced-configuration
#
# WARNING: if you're exposing Penpot to the internet, you should remove the flags
# 'disable-secure-session-cookies' and 'disable-email-verification'
x-flags: &penpot-flags
PENPOT_FLAGS: disable-email-verification enable-smtp enable-prepl-server disable-secure-session-cookies
x-uri: &penpot-public-uri
PENPOT_PUBLIC_URI: http://penpot.home.domroese.eu
x-body-size: &penpot-http-body-size
# Max body size (30MiB); Used for plain requests, should never be
# greater than multi-part size
PENPOT_HTTP_SERVER_MAX_BODY_SIZE: 31457280
# Max multipart body size (350MiB)
PENPOT_HTTP_SERVER_MAX_MULTIPART_BODY_SIZE: 367001600
networks:
penpot:
volumes:
penpot_postgres_v15:
penpot_assets:
# penpot_traefik:
# penpot_minio:
services:
## Traefik service declaration example. Consider using it if you are going to expose
## penpot to the internet, or a different host than `localhost`.
# traefik:
# image: traefik:v3.3
# networks:
# - penpot
# command:
# - "--api.insecure=true"
# - "--entryPoints.web.address=:80"
# - "--providers.docker=true"
# - "--providers.docker.exposedbydefault=false"
# - "--entryPoints.websecure.address=:443"
# - "--certificatesresolvers.letsencrypt.acme.tlschallenge=true"
# - "--certificatesresolvers.letsencrypt.acme.email=<EMAIL_ADDRESS>"
# - "--certificatesresolvers.letsencrypt.acme.storage=/traefik/acme.json"
# volumes:
# - "penpot_traefik:/traefik"
# - "/var/run/docker.sock:/var/run/docker.sock"
# ports:
# - "80:80"
# - "443:443"
penpot-frontend:
image: "penpotapp/frontend:${PENPOT_VERSION:-latest}"
restart: always
ports:
- 9574:8080
volumes:
- penpot_assets:/opt/data/assets
depends_on:
- penpot-backend
- penpot-exporter
networks:
- penpot
# labels:
# - "traefik.enable=true"
# ## HTTPS: example of labels for the case where penpot will be exposed to the
# ## internet with HTTPS using traefik.
# - "traefik.http.routers.penpot-https.rule=Host(`<DOMAIN_NAME>`)"
# - "traefik.http.routers.penpot-https.entrypoints=websecure"
# - "traefik.http.routers.penpot-https.tls.certresolver=letsencrypt"
# - "traefik.http.routers.penpot-https.tls=true"
environment:
<< : [*penpot-flags, *penpot-http-body-size]
penpot-backend:
image: "penpotapp/backend:${PENPOT_VERSION:-latest}"
restart: always
volumes:
- penpot_assets:/opt/data/assets
depends_on:
penpot-postgres:
condition: service_healthy
penpot-redis:
condition: service_healthy
networks:
- penpot
## Configuration envronment variables for the backend container.
environment:
<< : [*penpot-flags, *penpot-public-uri, *penpot-http-body-size]
## Penpot SECRET KEY. It serves as a master key from which other keys for subsystems
## (eg http sessions, or invitations) are derived.
##
## If you leave it commented, all created sessions and invitations will
## become invalid on container restart.
##
## If you going to uncomment this, we recommend to use a trully randomly generated
## 512 bits base64 encoded string here. You can generate one with:
##
## python3 -c "import secrets; print(secrets.token_urlsafe(64))"
# PENPOT_SECRET_KEY: my-insecure-key
## The PREPL host. Mainly used for external programatic access to penpot backend
## (example: admin). By default it will listen on `localhost` but if you are going to use
## the `admin`, you will need to uncomment this and set the host to `0.0.0.0`.
# PENPOT_PREPL_HOST: 0.0.0.0
## Database connection parameters. Don't touch them unless you are using custom
## postgresql connection parameters.
PENPOT_DATABASE_URI: postgresql://penpot-postgres/penpot
PENPOT_DATABASE_USERNAME: penpot
PENPOT_DATABASE_PASSWORD: penpot
## Redis is used for the websockets notifications. Don't touch unless the redis
## container has different parameters or different name.
PENPOT_REDIS_URI: redis://penpot-redis/0
## Default configuration for assets storage: using filesystem based with all files
## stored in a docker volume.
PENPOT_ASSETS_STORAGE_BACKEND: assets-fs
PENPOT_STORAGE_ASSETS_FS_DIRECTORY: /opt/data/assets
## Also can be configured to to use a S3 compatible storage
## service like MiniIO. Look below for minio service setup.
# AWS_ACCESS_KEY_ID: <KEY_ID>
# AWS_SECRET_ACCESS_KEY: <ACCESS_KEY>
# PENPOT_ASSETS_STORAGE_BACKEND: assets-s3
# PENPOT_STORAGE_ASSETS_S3_ENDPOINT: http://penpot-minio:9000
# PENPOT_STORAGE_ASSETS_S3_BUCKET: <BUKET_NAME>
## Telemetry. When enabled, a periodical process will send anonymous data about this
## instance. Telemetry data will enable us to learn how the application is used,
## based on real scenarios. If you want to help us, please leave it enabled. You can
## audit what data we send with the code available on github.
PENPOT_TELEMETRY_ENABLED: true
PENPOT_TELEMETRY_REFERER: compose
## Example SMTP/Email configuration. By default, emails are sent to the mailcatch
## service, but for production usage it is recommended to setup a real SMTP
## provider. Emails are used to confirm user registrations & invitations. Look below
## how the mailcatch service is configured.
PENPOT_SMTP_DEFAULT_FROM: no-reply@example.com
PENPOT_SMTP_DEFAULT_REPLY_TO: no-reply@example.com
PENPOT_SMTP_HOST: penpot-mailcatch
PENPOT_SMTP_PORT: 1025
PENPOT_SMTP_USERNAME:
PENPOT_SMTP_PASSWORD:
PENPOT_SMTP_TLS: false
PENPOT_SMTP_SSL: false
penpot-exporter:
image: "penpotapp/exporter:${PENPOT_VERSION:-latest}"
restart: always
depends_on:
penpot-redis:
condition: service_healthy
networks:
- penpot
environment:
# Don't touch it; this uses an internal docker network to
# communicate with the frontend.
PENPOT_PUBLIC_URI: http://penpot-frontend:8080
## Redis is used for the websockets notifications.
PENPOT_REDIS_URI: redis://penpot-redis/0
penpot-postgres:
image: "postgres:15"
restart: always
stop_signal: SIGINT
healthcheck:
test: ["CMD-SHELL", "pg_isready -U penpot"]
interval: 2s
timeout: 10s
retries: 5
start_period: 2s
volumes:
- penpot_postgres_v15:/var/lib/postgresql/data
networks:
- penpot
environment:
- POSTGRES_INITDB_ARGS=--data-checksums
- POSTGRES_DB=penpot
- POSTGRES_USER=penpot
- POSTGRES_PASSWORD=penpot
penpot-redis:
image: redis:7.2
restart: always
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
interval: 1s
timeout: 3s
retries: 5
start_period: 3s
networks:
- penpot
## A mailcatch service, used as temporal SMTP server. You can access via HTTP to the
## port 1080 for read all emails the penpot platform has sent. Should be only used as a
## temporal solution while no real SMTP provider is configured.
penpot-mailcatch:
image: sj26/mailcatcher:latest
restart: always
expose:
- '1025'
ports:
- "1080:1080"
networks:
- penpot
## Example configuration of MiniIO (S3 compatible object storage service); If you don't
## have preference, then just use filesystem, this is here just for the completeness.
# minio:
# image: "minio/minio:latest"
# command: minio server /mnt/data --console-address ":9001"
# restart: always
#
# volumes:
# - "penpot_minio:/mnt/data"
#
# environment:
# - MINIO_ROOT_USER=minioadmin
# - MINIO_ROOT_PASSWORD=minioadmin
#
# ports:
# - 9000:9000
# - 9001:9001

View File

@@ -1,7 +0,0 @@
pihole.domr.ovh,
pihole.home.domroese.eu {
tls soenke@domroese.eu
reverse_proxy 192.168.1.65:2000
}

View File

@@ -1,33 +0,0 @@
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
pihole:
container_name: pihole
image: pihole/pihole:latest
# For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
ports:
- "53:53/tcp"
- "53:53/udp"
- "67:67/udp" # Only required if you are using Pi-hole as your DHCP server
- "2000:80/tcp"
environment:
TZ: 'Europe/Berlin'
WEBPASSWORD: 'Diavid9600'
# Volumes store your data between container upgrades
volumes:
- '/home/soenke/docker-data/pihole/etc-pihole:/etc/pihole'
- '/home/soenke/docker-data/pihole/etc-dnsmasq.d:/etc/dnsmasq.d'
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
cap_add:
- NET_ADMIN # Required if you are using Pi-hole as your DHCP server, else not needed
restart: unless-stopped
labels:
kuma.tools.tag.name: 'Tools'
kuma.tools.tag.color: '#FF9900'
kuma.homelab.tag.name: 'Homelab'
kuma.homelab.tag.color: '#FF9955'
kuma.organization.tag.name: 'Organization'
kuma.organization.tag.color: '#FF99AA'
kuma.pihole.http.name: 'pihole'
kuma.pihole.http.url: 'https://pihole.domr.ovh/'
kuma.pihole.http.tag_names: '[{"name": "tools", "value": "" }, {"name": "organization", "value": "" }]'

View File

@@ -1 +0,0 @@
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +0,0 @@
Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53
Level3;4.2.2.1;4.2.2.2;;
Comodo;8.26.56.26;8.20.247.20;;
DNS.WATCH (DNSSEC);84.200.69.80;84.200.70.40;2001:1608:10:25:0:0:1c04:b12f;2001:1608:10:25:0:0:9249:d69b
Quad9 (filtered, DNSSEC);9.9.9.9;149.112.112.112;2620:fe::fe;2620:fe::9
Quad9 (unfiltered, no DNSSEC);9.9.9.10;149.112.112.10;2620:fe::10;2620:fe::fe:10
Quad9 (filtered, ECS, DNSSEC);9.9.9.11;149.112.112.11;2620:fe::11;2620:fe::fe:11
Cloudflare (DNSSEC);1.1.1.1;1.0.0.1;2606:4700:4700::1111;2606:4700:4700::1001

View File

@@ -1,106 +0,0 @@
# Pi-hole: A black hole for Internet advertisements
# (c) 2025 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Dnsmasq config for Pi-hole's FTLDNS
#
##################################################################################
# #
# FILE AUTOMATICALLY POPULATED BY PI-HOLE #
# ANY CHANGES MADE TO THIS FILE WILL BE LOST WHEN THE CONFIGURATION CHANGES #
# #
# IF YOU WISH TO CHANGE ANY OF THESE VALUES, CHANGE THEM IN #
# /etc/pihole/pihole.toml #
# and restart pihole-FTL #
# #
# ANY OTHER CHANGES SHOULD BE MADE IN A SEPARATE CONFIG FILE #
# WITHIN /etc/dnsmasq.d/yourname.conf #
# (make sure misc.etc_dnsmasq_d is set to true in /etc/pihole/pihole.toml) #
# #
# Last updated: 2025-03-07 13:21:22 CET #
# by FTL version v6.0.2 #
# #
##################################################################################
hostsdir=/etc/pihole/hosts
# Don't read /etc/resolv.conf. Get upstream servers only from the configuration
no-resolv
# DNS port to be used
port=53
# List of upstream DNS server
server=8.8.8.8
server=8.8.4.4
# Set the size of dnsmasq's cache. The default is 150 names. Setting the cache
# size to zero disables caching. Note: huge cache size impacts performance
cache-size=10000
# Return answers to DNS queries from /etc/hosts and interface-name and
# dynamic-host which depend on the interface over which the query was
# received. If a name has more than one address associated with it, and
# at least one of those addresses is on the same subnet as the interface
# to which the query was sent, then return only the address(es) on that
# subnet and return all the available addresses otherwise.
localise-queries
# Enable query logging
log-queries
log-async
# Specify the log file to use
# We set this even if logging is disabled to store warnings
# and errors in this file. This is useful for debugging.
log-facility=/var/log/pihole/pihole.log
# Use stale cache entries for a given number of seconds to optimize cache utilization
# Setting the time to zero will serve stale cache data regardless how long it has expired.
use-stale-cache=3600
# Listen on one interface
interface=eth0
# DNS domain for both the DNS and DHCP server
# This DNS domain in purely local. FTL may answer queries from
# /etc/hosts or DHCP but should never forward queries on that
# domain to any upstream servers
domain=lan
local=/lan/
# RFC 6761: Caching DNS servers SHOULD recognize
# test, localhost, invalid
# names as special and SHOULD NOT attempt to look up NS records for them, or
# otherwise query authoritative DNS servers in an attempt to resolve these
# names.
server=/test/
server=/localhost/
server=/invalid/
# The same RFC requests something similar for
# 10.in-addr.arpa. 21.172.in-addr.arpa. 27.172.in-addr.arpa.
# 16.172.in-addr.arpa. 22.172.in-addr.arpa. 28.172.in-addr.arpa.
# 17.172.in-addr.arpa. 23.172.in-addr.arpa. 29.172.in-addr.arpa.
# 18.172.in-addr.arpa. 24.172.in-addr.arpa. 30.172.in-addr.arpa.
# 19.172.in-addr.arpa. 25.172.in-addr.arpa. 31.172.in-addr.arpa.
# 20.172.in-addr.arpa. 26.172.in-addr.arpa. 168.192.in-addr.arpa.
# Pi-hole implements this via the dnsmasq option "bogus-priv" above
# (if enabled!) as this option also covers IPv6.
# OpenWRT furthermore blocks bind, local, onion domains
# see https://git.openwrt.org/?p=openwrt/openwrt.git;a=blob_plain;f=package/network/services/dnsmasq/files/rfc6761.conf;hb=HEAD
# and https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
# We do not include the ".local" rule ourselves, see https://github.com/pi-hole/pi-hole/pull/4282#discussion_r689112972
server=/bind/
server=/onion/
# Cache all DNS records
cache-rr=ANY
# RFC 8482: Providing Minimal-Sized Responses to DNS Queries That Have QTYPE=ANY
# Filters replies to queries for type ANY. Everything other than A, AAAA, MX and CNAME
# records are removed. Since ANY queries with forged source addresses can be used in DNS amplification attacks
# replies to ANY queries can be large) this defangs such attacks, whilst still supporting the
# one remaining possible use of ANY queries. See RFC 8482 para 4.3 for details.
filter-rr=ANY

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More