Compare commits

...

29 Commits

Author SHA1 Message Date
ace326c0ba Updated docker-compose files 2025-07-27 10:30:08 +02:00
493d5dde42 updated docker compose files 2024-11-05 13:48:34 +01:00
72d4b93fdf updated nextcloud 2024-11-04 11:44:40 +01:00
39f4231c09 gitea update 2024-11-04 10:42:13 +01:00
f5270aa908 port 2024-11-04 10:33:06 +01:00
8531665064 changed back to old port 2024-11-04 10:30:25 +01:00
d44bd646b8 updated gitea to latest version 2024-11-04 10:28:06 +01:00
ca619489ec Added Immich config 2024-11-02 19:37:50 +01:00
0aa1c1f999 Added paperless set up 2024-05-19 13:27:42 +02:00
53ea440d49 Added plex 2024-02-18 18:52:26 +01:00
c273decbc1 Added restart to docker files 2024-02-16 08:52:48 +01:00
2a7fbe790d update docker compose for traefik set-up 2024-02-14 20:44:20 +01:00
25db3542eb created volume for letsencrypt 2024-02-13 19:14:28 +01:00
1b4cfc5291 Move to Traefik 2024-02-13 19:10:53 +01:00
3fdd3e43f8 Updated compose files 2024-02-11 11:48:54 +01:00
dd279b13ea new things-board docker file 2023-08-22 13:15:48 +02:00
8da06841cf Added config files 2023-08-16 11:37:33 +02:00
ff7902f1c7 Merge branch 'master' of https://montana2000.ddns.net/gitea/nils/docker-compose 2023-08-16 11:15:03 +02:00
0e7ed68cec add new files for monitoring 2023-08-16 11:15:02 +02:00
99aa576b15 smaller updates 2022-11-06 17:55:17 +01:00
641928adbf set fhem to network mode host + added firewall rules 2022-11-06 08:49:58 +01:00
345d9a340c added node red restart and host network mode 2022-11-05 17:00:50 +01:00
d4fe5cf645 some first n5100 server set-ups added 2022-10-22 16:32:45 +02:00
74497faa6a Updated versions for docker files 2022-07-19 20:31:42 +02:00
8a318638ed Smaller updates and extions to docker compose files 2022-07-03 19:01:34 +02:00
16bc5d22dc update nextcloud 2021-11-08 18:22:50 +01:00
1b11399b19 rework of docker containers 2021-11-07 10:13:25 +01:00
d27ac60619 added portainer 2021-11-07 09:40:15 +01:00
529f7767d6 updated container versions 2021-11-07 09:10:43 +01:00
36 changed files with 10588 additions and 54 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/pihole/etc-pihole

34
dashy/docker-compose.yml Normal file
View File

@@ -0,0 +1,34 @@
version: "3.8"
services:
dashy:
# To build from source, replace 'image: lissy93/dashy' with 'build: .'
# build: .
image: lissy93/dashy
container_name: dashy
# Pass in your config file below, by specifying the path on your host machine
# volumes:
# - /root/my-config.yml:/app/public/conf.yml
ports:
- 4000:80
# Set any environmental variables
environment:
- NODE_ENV=production
# Specify your user ID and group ID. You can find this by running `id -u` and `id -g`
# - UID=1000
# - GID=1000
# Specify restart policy
restart: unless-stopped
# Configure healthchecks
healthcheck:
test: ['CMD', 'node', '/app/services/healthcheck']
interval: 1m30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- nginx_network
networks:
nginx_network:
driver: bridge

View File

@@ -2,7 +2,7 @@ version: "2.1"
services: services:
swag: swag:
image: ghcr.io/linuxserver/swag image: linuxserver/swag:1.31.0
container_name: swag container_name: swag
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN
@@ -15,16 +15,16 @@ services:
- VALIDATION=http - VALIDATION=http
- EMAIL=nilsgrunwald@msn.com - EMAIL=nilsgrunwald@msn.com
volumes: volumes:
- swag_conf:/config - swag_conf_update:/config
ports: ports:
- 443:443 - 444:443
- 80:80 - 81:80
restart: unless-stopped restart: unless-stopped
networks: networks:
- nginx_network - nginx_network
volumes: volumes:
swag_conf: swag_conf_update:
networks: networks:

View File

@@ -7,19 +7,18 @@ services:
fhem: fhem:
image: fhem/fhem:latest image: fhem/fhem:latest
container_name: fhem container_name: fhem
restart: always restart: unless-stopped
ports:
- "8083:8083"
- "7072:7072"
- "8090:8090"
volumes: volumes:
- fhem_opt:/opt/fhem/ - fhem_opt:/opt/fhem/
environment: environment:
TELNETPORT: 7072 TELNETPORT: 7072
TZ: Europe/Berlin TZ: Europe/Berlin
networks: network_mode: host
- nginx_network labels:
- "traefik.enable=true"
- "traefik.http.routers.fhem.rule=Host(`fhem.montana2000.freeddns.org`)"
- "traefik.http.routers.fhem.middlewares=fhem-auth"
- "traefik.http.middlewares.fhem-auth.basicauth.users=nils:$$apr1$$JDBG7p8k$$LB8y6/aKcNQ/ybLz7LXjY."
- "traefik.http.services.fhem-service.loadbalancer.server.port=8083"
networks:
nginx_network:
driver: bridge

View File

@@ -1,35 +1,49 @@
version: '2' version: "3"
networks:
traefik_web:
external: true
services: services:
web: server:
image: gitea/gitea:1.12.4 image: gitea/gitea:1.22.3
container_name: gitea container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=db:3306
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
restart: always
networks:
- traefik_web
volumes: volumes:
- gitea_data:/data - gitea_data:/data
ports: ports:
- "3000:3000" - "3000:3000"
- "22:22" - "222:22"
depends_on: depends_on:
- db - db
restart: always labels:
networks: - "traefik.enable=true"
- nginx_network - "traefik.http.routers.gitea.rule=Host(`gitea.montana2000.freeddns.org`)"
- "traefik.http.services.gitea-service.loadbalancer.server.port=3000"
db: db:
image: mariadb:10 image: mysql:8
volumes:
- gitea_mariadb:/var/lib/mysql
restart: always restart: always
environment: environment:
- MYSQL_ROOT_PASSWORD=inginf95 - MYSQL_ROOT_PASSWORD=gitea
- MYSQL_DATABASE=gitea
- MYSQL_USER=gitea - MYSQL_USER=gitea
- MYSQL_PASSWORD=inginf95 - MYSQL_PASSWORD=gitea
- MYSQL_DATABASE=gitea
networks: networks:
- nginx_network - traefik_web
volumes:
- gitea_mysql:/var/lib/mysql
volumes: volumes:
gitea_data: gitea_data:
gitea_mariadb: gitea_mysql:
networks:
nginx_network:
driver: bridge

View File

@@ -0,0 +1,32 @@
version: '2'
services:
grafana:
image: grafana/grafana
container_name: grafana
restart: always
ports:
- 4000:3000
networks:
- monitoring
volumes:
- grafana-volume:/var/lib/grafana
influxdb:
image: influxdb
container_name: influxdb
restart: always
ports:
- 8086:8086
networks:
- monitoring
volumes:
- influxdb-volume:/var/lib/influxdb
networks:
monitoring:
volumes:
grafana-volume:
influxdb-volume:

1
grafana-tick/env.grafana Normal file
View File

@@ -0,0 +1 @@
GF_INSTALL_PLUGINS=grafana-clock-panel,briangann-gauge-panel,natel-plotly-panel,grafana-simple-json-datasource

128
grafana-tick/telegraph.conf Normal file
View File

@@ -0,0 +1,128 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 10000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://influxdb:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "<22>s"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# Read metrics about cpu usage
[[inputs.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
totalcpu = true
# Comment this line if you want the raw CPU time metrics
fielddrop = ["time_*"]
# Read metrics about disk usage by mount point
[[inputs.disk]]
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points=["/"]
# Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
# present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
# By default, telegraf will gather stats for all devices including
# disk partitions.
# Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
# Uncomment the following line if you do not need disk serial numbers.
# skip_serial_number = true
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
###############################################################################
# SERVICE INPUTS #
###############################################################################

View File

@@ -0,0 +1,11 @@
version: '3'
services:
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- /PATH_TO_YOUR_CONFIG:/config
- /etc/localtime:/etc/localtime:ro
restart: unless-stopped
privileged: true
network_mode: host

21
immich/.env Normal file
View File

@@ -0,0 +1,21 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/media/immich/library
# The location where your database files are stored
DB_DATA_LOCATION=/media/immich/postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=nueiwnednecncuwdhewiudhewiduhewiduh
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

109
immich/docker-compose.yml Normal file
View File

@@ -0,0 +1,109 @@
#
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
networks:
traefik_web:
external: true
immich:
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
networks:
- traefik_web
- immich
labels:
- "traefik.enable=true"
- "traefik.http.routers.immich.rule=Host(`immich.montana2000.freeddns.org`)"
- "traefik.http.services.immich-service.loadbalancer.server.port=2283"
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
networks:
- immich
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
healthcheck:
test: redis-cli ping || exit 1
restart: always
networks:
- immich
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
#start_interval: 30s
start_period: 5m
networks:
- immich
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: always
volumes:
model-cache:

View File

@@ -2,7 +2,7 @@
version: '2' version: '2'
services: services:
mosquitto: mosquitto:
image: eclipse-mosquitto image: eclipse-mosquitto:2.0
expose: expose:
- "1884" - "1884"
- "9001" - "9001"
@@ -10,13 +10,16 @@ services:
- "1884:1884" - "1884:1884"
- "9001:9001" - "9001:9001"
volumes: volumes:
- D:\dev\docker\docker-compose\mosquitto\mosquitto.conf:/mosquitto/config/mosquitto.conf - ./mosquitto.conf:/mosquitto/config/mosquitto.conf
- mosquitto_data:/mosquitto/data - mosquitto_data:/mosquitto/data
- mosquitto_log:/mosquitto/log - mosquitto_log:/mosquitto/log
restart: always restart: unless-stopped
volumes: volumes:
mosquitto_data: mosquitto_data:
mosquitto_log: mosquitto_log:

View File

@@ -6,7 +6,7 @@ volumes:
services: services:
nextcloud_db: nextcloud_db:
image: mariadb image: mariadb:10.6
restart: always restart: always
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
volumes: volumes:
@@ -17,14 +17,15 @@ services:
- MYSQL_DATABASE=nextcloud - MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud - MYSQL_USER=nextcloud
networks: networks:
- nginx_network - traefik_web
app: app:
image: nextcloud image: nextcloud:30.0.1
container_name: nextcloud container_name: nextcloud
restart: always restart: always
ports: ports:
- 8080:80 - 8082:80
links: links:
- nextcloud_db - nextcloud_db
volumes: volumes:
@@ -35,8 +36,11 @@ services:
- MYSQL_USER=nextcloud - MYSQL_USER=nextcloud
- MYSQL_HOST=nextcloud_db - MYSQL_HOST=nextcloud_db
networks: networks:
- nginx_network - traefik_web
labels:
- "traefik.enable=true"
- "traefik.http.routers.nextcloud.rule=Host(`nextcloud.montana2000.freeddns.org`)"
networks: networks:
nginx_network: traefik_web:
driver: bridge external: true

22
nginx/docker-compose.yml Normal file
View File

@@ -0,0 +1,22 @@
services:
nginx:
image: nginx:1.27
labels:
- "traefik.enable=true"
- "traefik.http.routers.nginx.rule=Host(`montana2000.freeddns.org`)"
- "traefik.http.routers.nginx.middlewares=nginx-auth"
- "traefik.http.middlewares.nginx-auth.basicauth.users=nils:$$apr1$$JDBG7p8k$$LB8y6/aKcNQ/ybLz7LXjY."
networks:
- traefik_web
volumes:
- www-data:/usr/share/nginx/html/
restart: always
volumes:
www-data:
networks:
traefik_web:
external: true

View File

@@ -0,0 +1,30 @@
################################################################################
# Node-RED Stack or Compose
################################################################################
# docker stack deploy node-red --compose-file docker-compose-node-red.yml
# docker-compose -f docker-compose-node-red.yml -p myNoderedProject up
################################################################################
version: "3.7"
services:
node-red:
image: nodered/node-red:latest
environment:
- TZ=Europe/Amsterdam
volumes:
- node-red-data:/data
restart: unless-stopped
ports:
- "8080:8080"
- "1880:1880"
network_mode: host
labels:
- "traefik.enable=true"
- "traefik.http.routers.nodered.rule=Host(`nodered.montana2000.freeddns.org`)"
- "traefik.http.routers.nodered.middlewares=nodered-auth"
- "traefik.http.middlewares.nodered-auth.basicauth.users=nils:$$apr1$$JDBG7p8k$$LB8y6/aKcNQ/ybLz7LXjY."
- "traefik.http.services.nodered-service.loadbalancer.server.port=1880"
volumes:
node-red-data:

View File

@@ -0,0 +1,40 @@
version: '2.4'
services:
octoprint:
image: octoprint/octoprint:latest
restart: unless-stopped
ports:
- 8088:80
# devices:
# use `python -m serial.tools.miniterm` to see what the name is of the printer, this requires pyserial
# - /dev/ttyACM0:/dev/ttyACM0
# - /dev/video0:/dev/video0
volumes:
- octoprint:/octoprint
# uncomment the lines below to ensure camera streaming is enabled when
# you add a video device
#environment:
# - ENABLE_MJPG_STREAMER=true
####
# uncomment if you wish to edit the configuration files of octoprint
# refer to docs on configuration editing for more information
####
#config-editor:
# image: linuxserver/code-server
# ports:
# - 8443:8443
# depends_on:
# - octoprint
# restart: unless-stopped
# environment:
# - PUID=0
# - GUID=0
# - TZ=America/Chicago
# volumes:
# - octoprint:/octoprint
volumes:
octoprint:

View File

@@ -9,7 +9,7 @@ volumes:
services: services:
openhab: openhab:
image: "openhab/openhab:3.0.1" image: "openhab/openhab:3.0.1"
container_name: openhub container_name: openhab
restart: always restart: always
ports: ports:
- "88:8080" - "88:8080"

1
paperless/.env Normal file
View File

@@ -0,0 +1 @@
COMPOSE_PROJECT_NAME=paperless

View File

@@ -0,0 +1,47 @@
# The UID and GID of the user used to run paperless in the container. Set this
# to your UID and GID on the host so that you have write access to the
# consumption directory.
USERMAP_UID=1003
USERMAP_GID=1003
# Additional languages to install for text recognition, separated by a
# whitespace. Note that this is
# different from PAPERLESS_OCR_LANGUAGE (default=eng), which defines the
# language used for OCR.
# The container installs English, German, Italian, Spanish and French by
# default.
# See https://packages.debian.org/search?keywords=tesseract-ocr-&searchon=names&suite=buster
# for available languages.
PAPERLESS_OCR_LANGUAGES=deu
###############################################################################
# Paperless-specific settings #
###############################################################################
# All settings defined in the paperless.conf.example can be used here. The
# Docker setup does not use the configuration file.
# A few commonly adjusted settings are provided below.
# This is required if you will be exposing Paperless-ngx on a public domain
# (if doing so please consider security measures such as reverse proxy)
PAPERLESS_URL=https://paperless.montana2000.freeddns.org
# Adjust this key if you plan to make paperless available publicly. It should
# be a very long sequence of random characters. You don't need to remember it.
PAPERLESS_SECRET_KEY=JustSomeNonesenseSecretKeyIPutInHereForBeingSafe
# Use this variable to set a timezone for the Paperless Docker containers. If not specified, defaults to UTC.
PAPERLESS_TIME_ZONE=Europe/Berlin
# The default language to use for OCR. Set this to the language most of your
# documents are written in.
PAPERLESS_OCR_LANGUAGE=deu
# Set if accessing paperless via a domain subpath e.g. https://domain.com/PATHPREFIX and using a reverse-proxy like traefik or nginx
#PAPERLESS_FORCE_SCRIPT_NAME=/PATHPREFIX
#PAPERLESS_STATIC_URL=/PATHPREFIX/static/ # trailing slash required
PAPERLESS_CONSUMER_ASN_BARCODE_PREFIX=ASN
PAPERLESS_CONSUMER_ENABLE_ASN_BARCODE=true
PAPERLESS_CONSUMER_ENABLE_BARCODES=true
PAPERLESS_CONSUMER_BARCODE_SCANNER=ZXING

View File

@@ -0,0 +1,111 @@
# Docker Compose file for running paperless from the docker container registry.
# This file contains everything paperless needs to run.
# Paperless supports amd64, arm and arm64 hardware.
#
# All compose files of paperless configure paperless in the following way:
#
# - Paperless is (re)started on system boot, if it was running before shutdown.
# - Docker volumes for storing data are managed by Docker.
# - Folders for importing and exporting files are created in the same directory
# as this file and mounted to the correct folders inside the container.
# - Paperless listens on port 8000.
#
# In addition to that, this Docker Compose file adds the following optional
# configurations:
#
# - Instead of SQLite (default), PostgreSQL is used as the database server.
# - Apache Tika and Gotenberg servers are started with paperless and paperless
# is configured to use these services. These provide support for consuming
# Office documents (Word, Excel, Power Point and their LibreOffice counter-
# parts.
#
# To install and update paperless with this file, do the following:
#
# - Copy this file as 'docker-compose.yml' and the files 'docker-compose.env'
# and '.env' into a folder.
# - Run 'docker compose pull'.
# - Run 'docker compose run --rm webserver createsuperuser' to create a user.
# - Run 'docker compose up -d'.
#
# For more extensive installation and update instructions, refer to the
# documentation.
version: "3.4"
services:
broker:
image: docker.io/library/redis:7
restart: unless-stopped
volumes:
- redisdata:/data
networks:
- traefik_web
db:
image: docker.io/library/postgres:16
restart: unless-stopped
volumes:
- pgdata:/var/lib/postgresql/data
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: paperless
networks:
- traefik_web
webserver:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
depends_on:
- db
- broker
- gotenberg
- tika
ports:
- "8010:8000"
volumes:
- data:/usr/src/paperless/data
- media:/usr/src/paperless/media
- /home/nils/paperless/export:/usr/src/paperless/export
- /home/nils/paperless/consume:/usr/src/paperless/consume
env_file: docker-compose.env
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
labels:
- "traefik.enable=true"
- "traefik.http.routers.paperless.rule=Host(`paperless.montana2000.freeddns.org`)"
- "traefik.docker.network=proxy"
networks:
- traefik_web
gotenberg:
image: docker.io/gotenberg/gotenberg:7.10
restart: unless-stopped
# The gotenberg chromium route is used to convert .eml files. We do not
# want to allow external content like tracking pixels or even javascript.
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
networks:
- traefik_web
tika:
image: ghcr.io/paperless-ngx/tika:latest
restart: unless-stopped
networks:
- traefik_web
volumes:
data:
media:
pgdata:
redisdata:
networks:
traefik_web:
external: true

24
pihole/docker-compose.yml Normal file
View File

@@ -0,0 +1,24 @@
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
pihole:
container_name: pihole
hostname: Pihole
image: pihole/pihole:latest
network_mode: vlanPihole
environment:
# Set the appropriate timezone for your location (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g:
TZ: 'Europe/Berlin'
# Set a password to access the web interface. Not setting one will result in a random password being assigned
FTLCONF_webserver_api_password: 'inginf95'
WEB_PORT: '9090'
# Volumes store your data between container upgrades
volumes:
# For persisting Pi-hole's databases and common configuration file
- './etc-pihole:/etc/pihole'
# Uncomment the below if you have custom dnsmasq config files that you want to persist. Not needed for most starting fresh with Pi-hole v6. If you're upgrading from v5 you and have used this directory before, you should keep it enabled for the first v6 container start to allow for a complete migration. It can be removed afterwards
#- './etc-dnsmasq.d:/etc/dnsmasq.d'
#cap_add:
# See https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# Required if you are using Pi-hole as your DHCP server, else not needed
#- NET_ADMIN
restart: unless-stopped

View File

@@ -0,0 +1,37 @@
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
pihole:
container_name: pihole
image: pihole/pihole:latest
ports:
# DNS Ports
- "53:53/tcp"
- "53:53/udp"
# Default HTTP Port
- "9090:80/tcp"
# Default HTTPs Port. FTL will generate a self-signed certificate
#- "9043:443/tcp"
# Uncomment the below if using Pi-hole as your DHCP Server
#- "67:67/udp"
networks:
- pihole_network
environment:
# Set the appropriate timezone for your location (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), e.g:
TZ: 'Europe/Berlin'
# Set a password to access the web interface. Not setting one will result in a random password being assigned
FTLCONF_webserver_api_password: 'inginf95'
# Volumes store your data between container upgrades
volumes:
# For persisting Pi-hole's databases and common configuration file
- './etc-pihole:/etc/pihole'
# Uncomment the below if you have custom dnsmasq config files that you want to persist. Not needed for most starting fresh with Pi-hole v6. If you're upgrading from v5 you and have used this directory before, you should keep it enabled for the first v6 container start to allow for a complete migration. It can be removed afterwards
#- './etc-dnsmasq.d:/etc/dnsmasq.d'
#cap_add:
# See https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# Required if you are using Pi-hole as your DHCP server, else not needed
#- NET_ADMIN
restart: unless-stopped
networks:
pihole_network:
driver: bridge

38
plex/docker-compose.yaml Normal file
View File

@@ -0,0 +1,38 @@
version: "2.1"
services:
plex:
image: lscr.io/linuxserver/plex:latest
container_name: plex
environment:
- PUID=1000
- PGID=1000
- TZ=EUROPE/BERLIN
- VERSION=docker
- PLEX_CLAIM=claim-J9FARYcKeGSzVPU7y5Fh
volumes:
- plex_config:/config
- /media/plex/tv-shows:/tv-shows
- /media/plex/movies:/movies
- /media/plex/music:/music
- /media/plex/cartoons:/cartoons
- /media/plex/comedy:/comedy
restart: unless-stopped
ports:
# Plex DLNA Server
- 1901:1900/udp
# GDM network discovery
- 32410:32410/udp
- 32412:32412/udp
- 32413:32413/udp
- 32414:32414/udp
labels:
- "traefik.enable=true"
- "traefik.http.routers.plex.rule=Host(`plex.montana2000.freeddns.org`)"
- "traefik.http.routers.plex.service=plex"
- "traefik.http.services.plex.loadbalancer.server.port=32400"
- "traefik.docker.network=proxy"
network_mode: host
volumes:
plex_config:

View File

@@ -0,0 +1,21 @@
version: '2'
services:
portainer:
image: portainer/portainer-ce:latest
restart: always
networks:
- nginx_network
ports:
- 8000:8000
- 9000:9000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- portainer_data:/data
volumes:
portainer_data:
networks:
nginx_network:
driver: bridge

View File

@@ -0,0 +1,89 @@
version: '3'
volumes:
prometheus-data:
driver: local
prometheus-config:
driver: local
monitoring-grafana-data:
driver: local
loki_config:
driver: local
loki_data:
driver: local
promtail_config:
driver: local
services:
promtail:
image: grafana/promtail:latest
container_name: monitoring-promtail
volumes:
- /var/log:/var/log
- /var/lib/docker/containers:/var/lib/docker/containers
- promtail_config:/etc/promtail-config
command: -config.file=/etc/promtail-config/promtail.yml
restart: unless-stopped
loki:
image: grafana/loki:latest
container_name: monitoring-loki
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- loki_config:/etc/loki
- loki_data:/data/loki
restart: unless-stopped
node-exporter:
image: prom/node-exporter
container_name: monitoring-node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9100:9100
restart: unless-stopped
cadvisor:
image: google/cadvisor:latest
container_name: monitoring-cadvisor
ports:
- "8099:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
restart: unless-stopped
devices:
- /dev/kmsg
prometheus:
image: prom/prometheus:latest
container_name: monitoring-prometheus
ports:
- "9090:9090"
volumes:
- prometheus-config:/etc/prometheus
- prometheus-data:/prometheus
restart: unless-stopped
command:
- "--config.file=/etc/prometheus/prometheus.yml"
grafana:
image: grafana/grafana-oss:latest
container_name: monitoring-grafana
ports:
- "3090:3000"
volumes:
- monitoring-grafana-data:/var/lib/grafana
restart: unless-stopped

View File

@@ -0,0 +1,29 @@
auth_enabled: false
server:
http_listen_port: 3100
common:
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093

View File

@@ -0,0 +1,27 @@
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
# external_labels:
# monitor: 'codelab-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
# Example job for node_exporter
- job_name: 'node_exporter'
static_configs:
- targets: ['monitoring-node-exporter:9100']
# Example job for cadvisor
- job_name: 'cadvisor'
static_configs:
- targets: ['monitoring-cadvisor:8080']

View File

@@ -0,0 +1,17 @@
server:
disable: true
positions:
filename: /tmp/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: system
pipeline_stages:
- docker: {}
static_configs:
- labels:
job: docker
__path__: /var/lib/docker/containers/*/*-json.log

View File

@@ -0,0 +1,75 @@
version: '3.7'
volumes:
smarthome-monitoring-influxdb-data:
driver: local
smarthome-monitoring-influxdb-config:
driver: local
#smarthome-monitoring-chronograf-data:
# driver: local
smarthome-monitoring-telegraf-data:
driver: local
smarthome-monitoring-grafana-data:
driver: local
services:
influxdb:
image: influxdb:latest
container_name: smarthome-monitoring-influxdb
volumes:
- smarthome-monitoring-influxdb-data:/var/lib/influxdb2
- smarthome-monitoring-influxdb-config:/etc/influxdb2
restart: unless-stopped
environment:
INFLUXDB_REPORTING_DISABLED: "true"
INFLUXDB_ADMIN_ENABLED: "true"
INFLUXDB_DB: "mydb"
ports:
- "8086:8086"
- "8082:8082"
- "8089:8089"
#kapacitor:
# image: kapacitor:latest
# container_name: smarthome-monitoring-kapacitor
# environment:
# KAPACITOR_HOSTNAME: smarthome-monitoring-kapacitor
# KAPACITOR_INFLUXDB_0_URLS_0: http://smarthome-monitoring-influxdb:8086
# ports:
# - "9092:9092"
#chronograf:
# image: chronograf:latest
# container_name: smarthome-monitoring-chronograf
# volumes:
# - smarthome-monitoring-chronograf-data:/var/lib/chronograf
# ports:
# - "8888:8888"
# environment:
# INFLUXDB_URL: http://smarthome-monitoring-influxdb:8086
# KAPACITOR_URL: http://smarthome-monitoring-kapacitor:9092
# REPORTING_DISABLED: "true"
# depends_on:
# - influxdb
# - kapacitor
telegraf:
image: telegraf:latest
container_name: smarthome-monitoring-telegraf
volumes:
- smarthome-monitoring-telegraf-data:/etc/telegraf
- "/var/run/docker.sock:/var/run/docker.sock:ro"
restart: unless-stopped
environment:
HOSTNAME: smarthome-monitoring-telegraf
grafana:
image: grafana/grafana-oss:latest
container_name: smarthome-monitoring-grafana
ports:
- "3091:3000"
volumes:
- smarthome-monitoring-grafana-data:/var/lib/grafana
restart: unless-stopped

File diff suppressed because it is too large Load Diff

162
tandoor-recipes/.env Normal file
View File

@@ -0,0 +1,162 @@
# only set this to true when testing/debugging
# when unset: 1 (true) - dont unset this, just for development
DEBUG=0
SQL_DEBUG=0
# HTTP port to bind to
# TANDOOR_PORT=8080
# hosts the application can run under e.g. recipes.mydomain.com,cooking.mydomain.com,...
ALLOWED_HOSTS=*
# random secret key, use for example `base64 /dev/urandom | head -c50` to generate one
# ---------------------------- REQUIRED -------------------------
SECRET_KEY=ThisIsMySecretKeyHaHa
# ---------------------------------------------------------------
# your default timezone See https://timezonedb.com/time-zones for a list of timezones
TIMEZONE=Europe/Berlin
# add only a database password if you want to run with the default postgres, otherwise change settings accordingly
DB_ENGINE=django.db.backends.postgresql
# DB_OPTIONS= {} # e.g. {"sslmode":"require"} to enable ssl
POSTGRES_HOST=db_recipes
POSTGRES_PORT=5432
POSTGRES_USER=djangouser
# ---------------------------- REQUIRED -------------------------
POSTGRES_PASSWORD=inginf95
# ---------------------------------------------------------------
POSTGRES_DB=djangodb
# database connection string, when used overrides other database settings.
# format might vary depending on backend
# DATABASE_URL = engine://username:password@host:port/dbname
# the default value for the user preference 'fractions' (enable/disable fraction support)
# default: disabled=0
FRACTION_PREF_DEFAULT=0
# the default value for the user preference 'comments' (enable/disable commenting system)
# default comments enabled=1
COMMENT_PREF_DEFAULT=1
# Users can set a amount of time after which the shopping list is refreshed when they are in viewing mode
# This is the minimum interval users can set. Setting this to low will allow users to refresh very frequently which
# might cause high load on the server. (Technically they can obviously refresh as often as they want with their own scripts)
SHOPPING_MIN_AUTOSYNC_INTERVAL=5
# Default for user setting sticky navbar
# STICKY_NAV_PREF_DEFAULT=1
# If base URL is something other than just / (you are serving a subfolder in your proxy for instance http://recipe_app/recipes/)
# Be sure to not have a trailing slash: e.g. '/recipes' instead of '/recipes/'
# SCRIPT_NAME=/recipes
# If staticfiles are stored at a different location uncomment and change accordingly, MUST END IN /
# this is not required if you are just using a subfolder
# This can either be a relative path from the applications base path or the url of an external host
# STATIC_URL=/static/
# If mediafiles are stored at a different location uncomment and change accordingly, MUST END IN /
# this is not required if you are just using a subfolder
# This can either be a relative path from the applications base path or the url of an external host
# MEDIA_URL=/media/
# Serve mediafiles directly using gunicorn. Basically everyone recommends not doing this. Please use any of the examples
# provided that include an additional nxginx container to handle media file serving.
# If you know what you are doing turn this back on (1) to serve media files using djangos serve() method.
# when unset: 1 (true) - this is temporary until an appropriate amount of time has passed for everyone to migrate
GUNICORN_MEDIA=0
# S3 Media settings: store mediafiles in s3 or any compatible storage backend (e.g. minio)
# as long as S3_ACCESS_KEY is not set S3 features are disabled
# S3_ACCESS_KEY=
# S3_SECRET_ACCESS_KEY=
# S3_BUCKET_NAME=
# S3_REGION_NAME= # default none, set your region might be required
# S3_QUERYSTRING_AUTH=1 # default true, set to 0 to serve media from a public bucket without signed urls
# S3_QUERYSTRING_EXPIRE=3600 # number of seconds querystring are valid for
# S3_ENDPOINT_URL= # when using a custom endpoint like minio
# Email Settings, see https://docs.djangoproject.com/en/3.2/ref/settings/#email-host
# Required for email confirmation and password reset (automatically activates if host is set)
# EMAIL_HOST=
# EMAIL_PORT=
# EMAIL_HOST_USER=
# EMAIL_HOST_PASSWORD=
# EMAIL_USE_TLS=0
# EMAIL_USE_SSL=0
# email sender address (default 'webmaster@localhost')
# DEFAULT_FROM_EMAIL=
# prefix used for account related emails (default "[Tandoor Recipes] ")
# ACCOUNT_EMAIL_SUBJECT_PREFIX=
# allow authentication via reverse proxy (e.g. authelia), leave off if you dont know what you are doing
# see docs for more information https://vabene1111.github.io/recipes/features/authentication/
# when unset: 0 (false)
REVERSE_PROXY_AUTH=0
# Default settings for spaces, apply per space and can be changed in the admin view
# SPACE_DEFAULT_MAX_RECIPES=0 # 0=unlimited recipes
# SPACE_DEFAULT_MAX_USERS=0 # 0=unlimited users per space
# SPACE_DEFAULT_MAX_FILES=0 # Maximum file storage for space in MB. 0 for unlimited, -1 to disable file upload.
# SPACE_DEFAULT_ALLOW_SHARING=1 # Allow users to share recipes with public links
# allow people to create accounts on your application instance (without an invite link)
# when unset: 0 (false)
# ENABLE_SIGNUP=0
# If signup is enabled you might want to add a captcha to it to prevent spam
# HCAPTCHA_SITEKEY=
# HCAPTCHA_SECRET=
# if signup is enabled you might want to provide urls to data protection policies or terms and conditions
# TERMS_URL=
# PRIVACY_URL=
# IMPRINT_URL=
# enable serving of prometheus metrics under the /metrics path
# ATTENTION: view is not secured (as per the prometheus default way) so make sure to secure it
# trough your web server (or leave it open of you dont care if the stats are exposed)
# ENABLE_METRICS=0
# allows you to setup OAuth providers
# see docs for more information https://vabene1111.github.io/recipes/features/authentication/
# SOCIAL_PROVIDERS = allauth.socialaccount.providers.github, allauth.socialaccount.providers.nextcloud,
# Should a newly created user from a social provider get assigned to the default space and given permission by default ?
# ATTENTION: This feature might be deprecated in favor of a space join and public viewing system in the future
# default 0 (false), when 1 (true) users will be assigned space and group
# SOCIAL_DEFAULT_ACCESS = 1
# if SOCIAL_DEFAULT_ACCESS is used, which group should be added
# SOCIAL_DEFAULT_GROUP=guest
# Django session cookie settings. Can be changed to allow a single django application to authenticate several applications
# when running under the same database
# SESSION_COOKIE_DOMAIN=.example.com
# SESSION_COOKIE_NAME=sessionid # use this only to not interfere with non unified django applications under the same top level domain
# by default SORT_TREE_BY_NAME is disabled this will store all Keywords and Food in the order they are created
# enabling this setting makes saving new keywords and foods very slow, which doesn't matter in most usecases.
# however, when doing large imports of recipes that will create new objects, can increase total run time by 10-15x
# Keywords and Food can be manually sorted by name in Admin
# This value can also be temporarily changed in Admin, it will revert the next time the application is started
# This will be fixed/changed in the future by changing the implementation or finding a better workaround for sorting
# SORT_TREE_BY_NAME=0
# LDAP authentication
# default 0 (false), when 1 (true) list of allowed users will be fetched from LDAP server
#LDAP_AUTH=
#AUTH_LDAP_SERVER_URI=
#AUTH_LDAP_BIND_DN=
#AUTH_LDAP_BIND_PASSWORD=
#AUTH_LDAP_USER_SEARCH_BASE_DN=
#AUTH_LDAP_TLS_CACERTFILE=
# Enables exporting PDF (see export docs)
# Disabled by default, uncomment to enable
# ENABLE_PDF_EXPORT=1
# Recipe exports are cached for a certain time by default, adjust time if needed
# EXPORT_FILE_CACHE_DURATION=600

View File

@@ -0,0 +1,43 @@
version: "3"
services:
db_recipes:
restart: always
image: postgres:11-alpine
volumes:
- tandoor_postgresql:/var/lib/postgresql/data
env_file:
- ./.env
web_recipes:
restart: always
image: vabene1111/recipes
env_file:
- ./.env
volumes:
- tandoor_staticfiles:/opt/recipes/staticfiles
- tandoor_nginx_config:/opt/recipes/nginx/conf.d
- tandoor_mediafiles:/opt/recipes/mediafiles
depends_on:
- db_recipes
nginx_recipes:
image: nginx:mainline-alpine
container_name: tandoor-recipes
restart: always
ports:
- 8095:80
env_file:
- ./.env
depends_on:
- web_recipes
volumes:
- tandoor_nginx_config:/etc/nginx/conf.d:ro
- tandoor_staticfiles:/static:ro
- tandoor_mediafiles:/media:ro
volumes:
tandoor_nginx_config:
tandoor_staticfiles:
tandoor_mediafiles:
tandoor_postgresql:

View File

@@ -0,0 +1,19 @@
version: '3.0'
services:
mytb:
restart: unless-stopped
image: "thingsboard/tb-postgres:latest"
ports:
- "9090:9090"
- "1883:1883"
- "7070:7070"
- "5683-5688:5683-5688/udp"
environment:
TB_QUEUE_TYPE: in-memory
volumes:
- things_board_data:/data
- things_board_log:/var/log/thingsboard
volumes:
things_board_data:
things_board_log:

View File

@@ -0,0 +1,18 @@
version: '3'
services:
traccar:
image: traccar/traccar:latest
container_name: traccar
restart: always
ports:
- "8096:8082"
- "5000-5150:5000-5150"
- "5000-5150:5000-5150/udp"
volumes:
- traccar_logs:/opt/traccar/logs:rw
- traccar_data:/opt/traccar/data:rw
volumes:
traccar_logs:
traccar_data:

View File

@@ -0,0 +1,36 @@
version: "3.3"
services:
traefik:
image: "traefik:v3.2"
container_name: "traefik"
command:
#- "--log.level=DEBUG"
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--providers.docker.network=traefik_web"
- "--entrypoints.https.address=:443"
- "--entrypoints.https.http.tls.certResolver=le"
- "--certificatesresolvers.le.acme.tlschallenge=true"
- "--certificatesresolvers.le.acme.email=nils.grunwald@msn.com"
- "--certificatesresolvers.le.acme.storage=/letsencrypt/acme.json"
ports:
- "444:443"
- "9080:8080"
volumes:
- letsencrypt:/letsencrypt
- "/var/run/docker.sock:/var/run/docker.sock:ro"
networks:
- web
extra_hosts:
- host.docker.internal:172.17.0.1
restart: always
volumes:
letsencrypt:
networks:
web:
name: traefik_web

View File

@@ -0,0 +1,18 @@
# Simple docker-compose.yml
# You can change your port or volume location
version: '3.3'
services:
uptime-kuma:
image: louislam/uptime-kuma:1
container_name: uptime-kuma
volumes:
- uptime-kuma-data:/app/data
ports:
- 3001:3001 # <Host Port>:<Container Port>
restart: always
volumes:
uptime-kuma-data: