Compare commits

..

77 Commits

Author SHA1 Message Date
fae1e776c9 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 23:11:39 +01:00
24a0a03a57 . 2024-09-05 23:10:00 +01:00
41c33b23af .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 23:01:59 +01:00
e20fd0b80f .
All checks were successful
deploy host / Deploy (push) Successful in 8s
2024-09-05 23:01:03 +01:00
bb8e4e59ae .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:59:30 +01:00
a5136e07a2 .
Some checks failed
deploy host / Deploy (push) Failing after 4s
2024-09-05 22:58:53 +01:00
1910b1da3a .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:58:42 +01:00
d1093834fb .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:57:18 +01:00
07e34529a5 . 2024-09-05 22:55:16 +01:00
fa9a8e67e9 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:54:17 +01:00
e5aa21e3d5 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:53:15 +01:00
2ee7121c08 .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 22:52:52 +01:00
042fc295de .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 22:52:26 +01:00
08f6d60046 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:52:01 +01:00
a70a5cf420 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:51:33 +01:00
1983102d01 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:50:01 +01:00
56bd7bca4a .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:48:06 +01:00
c3a3db9a5b .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:47:01 +01:00
0b3c8564db .
All checks were successful
deploy host / Deploy (push) Successful in 8s
2024-09-05 22:45:24 +01:00
66e93a72d0 .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 22:43:02 +01:00
620106ef60 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:42:32 +01:00
88f9289665 .
All checks were successful
deploy host / Deploy (push) Successful in 5s
2024-09-05 22:41:39 +01:00
a426a35f86 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:40:56 +01:00
8325fff602 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:38:05 +01:00
94ea5525f5 .
All checks were successful
deploy host / Deploy (push) Successful in 8s
2024-09-05 22:37:07 +01:00
6ac89218fd .
All checks were successful
deploy host / Deploy (push) Successful in 9s
2024-09-05 22:35:53 +01:00
79cb5c6fd5 modified: .gitea/workflows/host.yml
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:35:07 +01:00
2be358ff98 .
All checks were successful
deploy host / Deploy (push) Successful in 8s
2024-09-05 22:32:25 +01:00
34a918ec8c .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:27:35 +01:00
d7b2e561cc .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:26:27 +01:00
c27fa22928 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:25:49 +01:00
4fd23a5d8c .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:25:11 +01:00
36b82c9038 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 22:24:08 +01:00
3f5d52a5fa ,
All checks were successful
deploy host / Deploy (push) Successful in 14s
2024-09-05 22:23:16 +01:00
933e774c7e .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:20:38 +01:00
11cea9b6fc modified: .gitea/workflows/host.yml
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:18:22 +01:00
9f9a390765 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:16:38 +01:00
7eb254da2f .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:10:42 +01:00
e7b1181dab .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:09:52 +01:00
129e1836d4 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:08:46 +01:00
4ea6e35522 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:07:30 +01:00
6d9a1fd947 .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 22:06:35 +01:00
8d0f2c181a .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 22:06:07 +01:00
d7c41dd3cb .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 22:03:28 +01:00
260ae62e59 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 22:01:48 +01:00
bccf153db0 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 21:59:55 +01:00
66e09df419 modified: .gitea/workflows/host.yml
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 21:57:39 +01:00
edde01efb5 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 21:55:28 +01:00
ab7f0b2e97 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 21:54:40 +01:00
fba35ce3bc . 2024-09-05 21:51:39 +01:00
631ff376e5 .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 21:39:52 +01:00
ca14c75897 modified: .gitea/workflows/host.yml
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 21:38:50 +01:00
86ba316bdc .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 21:37:44 +01:00
a739a72f4a .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 21:36:16 +01:00
5dda1e9135 . 2024-09-05 21:36:00 +01:00
9455ef9d80 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 21:22:54 +01:00
1193a81fb0 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 21:21:46 +01:00
f5a368e3f8 .
Some checks failed
deploy host / Deploy (push) Failing after 5s
2024-09-05 21:21:12 +01:00
b2cae5fb94 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 21:18:04 +01:00
2a5f3c14ee .
All checks were successful
deploy host / Deploy (push) Successful in 9s
2024-09-05 21:17:09 +01:00
d793e8781c .
All checks were successful
deploy host / Deploy (push) Successful in 1m42s
2024-09-05 21:00:15 +01:00
7a2ac916c9 ..
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-05 20:58:20 +01:00
56cd2c69af .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 20:54:00 +01:00
a7d600c84d .
All checks were successful
deploy host / Deploy (push) Successful in 7s
2024-09-05 20:41:32 +01:00
f9bc6ae967 .
Some checks failed
deploy host / Deploy (push) Failing after 6s
2024-09-05 20:38:54 +01:00
b4768c784f .
Some checks failed
deploy host / Deploy (push) Failing after 2m34s
2024-09-05 20:22:58 +01:00
79e4d6cd47 .
Some checks failed
deploy host / Deploy (push) Failing after 26s
2024-09-05 20:21:36 +01:00
7df12159ea .
Some checks failed
deploy host / Deploy (push) Failing after 40s
2024-09-05 20:20:23 +01:00
dcff1e3c7f .
Some checks failed
deploy host / Deploy (push) Failing after 2m3s
2024-09-05 20:14:03 +01:00
baff515fd5 .
All checks were successful
deploy host / Deploy (push) Successful in 16s
2024-09-05 20:12:14 +01:00
54d5fc1c92 modified: .gitea/workflows/host.yml
Some checks failed
deploy host / Deploy (push) Failing after 2s
2024-09-05 20:10:58 +01:00
24edd39cf3 modified: .gitea/workflows/host.yml
Some checks failed
deploy host / Deploy (push) Failing after 15s
2024-09-05 19:49:53 +01:00
14b46c5ced .
Some checks failed
deploy host / Deploy (push) Failing after 2s
2024-09-05 19:38:57 +01:00
776c2b3706 .
Some checks failed
deploy host / Deploy (push) Failing after 9s
2024-09-05 08:57:19 +01:00
abbad37871 .
Some checks failed
deploy host / Deploy (push) Failing after 15s
2024-09-05 08:53:14 +01:00
25802094a5 some tests 2024-09-04 00:55:09 +01:00
39e5d5a855 .
All checks were successful
deploy host / Deploy (push) Successful in 6s
2024-09-03 22:15:37 +01:00
62 changed files with 952 additions and 6905 deletions

View File

@@ -1,37 +0,0 @@
{
"name": "casa-dev",
"image": "git.limbosolutions.com/mylimbo/devcontainers/devops:latest",
"remoteUser": "vscode",
"runArgs": [
"--hostname=casa-dev"
],
"mounts": [
"source=${localWorkspaceFolder}/../lms,target=/workspaces/lms,type=bind",
"source=${localWorkspaceFolder}/../homeAssistant,target=/workspaces/homeAssistant,type=bind",
"source=${localWorkspaceFolder}/../mosquitto,target=/workspaces/mosquitto,type=bind",
"source=${localWorkspaceFolder}/../kb,target=/workspaces/kb,type=bind",
"source=${localWorkspaceFolder}/../pi.bluetooth.speaker,target=/workspaces/pi.bluetooth.speaker,type=bind",
"source=${localWorkspaceFolder}/.env.d/kube,target=/home/vscode/.kube,type=bind",
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,consistency=cached",
"source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,type=bind,consistency=cached"
],
"customizations": {
"vscode": {
"extensions": [
"ms-kubernetes-tools.vscode-kubernetes-tools",
"redhat.ansible",
"mtxr.sqltools-driver-mysql",
"stateful.runme",
"yzhang.markdown-all-in-one",
"davidanson.vscode-markdownlint",
"eamodio.gitlens",
"m4ns0ur.base64",
"rogalmic.bash-debug",
"streetsidesoftware.code-spell-checker",
"ms-azuretools.vscode-containers",
"sanjulaganepola.github-local-actions",
"eamodio.gitlens"
]
}
}
}

View File

@@ -1,52 +0,0 @@
name: Deploy Casa services CI/CD Pipeline
on:
push:
branches:
- fix/*
- main
- master
paths:
- 'services/**'
- '.gitea/workflows/casa-services**'
pull_request:
paths:
- 'monitoring/**'
- '.gitea/workflows/monitoring**'
schedule:
- cron: '0 15 * * 0' # every sunday 3 pm
jobs:
deploy:
runs-on: casa-vlan-cicd
env:
GITHUB_TEMP: ${{ runner.temp }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Fetch limbo public actions
env:
RUNNER_TEMP: "${{ runner.temp }}"
WORKSPACE: "${{ gitea.workspace }}"
run: |
curl -fsSL https://git.limbosolutions.com/kb/gitea/raw/branch/main/cloud-scripts/setup-limbo-actions.sh | bash 2>&1
- name: Setup kubectl
uses: ./.gitea/limbo_actions/kubectl-setup
with:
kube_server: ${{ secrets.CASA_VLAN_KUBE_SERVER }}
kube_ca_base64: ${{ secrets.CASA_VLAN_KUBE_CA_BASE64 }}
kube_token: ${{ secrets.CASA_VLAN_KUBE_TOKEN }}
- name: Deploy zigbee2mqtt
shell: bash
run: |
kubectl apply -f ./services/zigbee2mqtt/deploy/zigbee2mqtt-deploy.yaml
- name: Deploy wyoming
shell: bash
run: |
kubectl apply -f ./services/wyoming/deploy/wyoming-deploy.yaml

37
.gitea/workflows/host.yml Normal file
View File

@@ -0,0 +1,37 @@
name: deploy host
on:
push:
# paths:
# - "ansible/**"
# - ".gitea/workflows/**host**.yml"
jobs:
Deploy:
runs-on: "homesrv-deploy"
# - data
# - /src/*.json
steps:
- name: Checkout code
uses: actions/checkout@v2
# - name: ls
# run: ls -lah ${GITHUB_WORKSPACE}
# - name: Run Ansible Playbook
# run: |
# docker run --rm --volumes-from ${{ env.JOB_CONTAINER_NAME }} ubuntu:latest ls -lah ${GITHUB_WORKSPACE}; code-server
- name: Run Ansible Playbook
run: |
docker run --rm \
-e ANSIBLE_PLAYBOOK_PRIVATE_KEY="${{ secrets.HOST_ANSIBLE_PRIVATE_KEY }}" \
-e ANSIBLE_PLAYBOOK_REMOTE_USER="${{ secrets.HOST_ANSIBLE_REMOTE_USER }}" \
-e ANSIBLE_PLAYBOOK_INVENTORY="${{ secrets.HOST_ANSIBLE_REMOTE_ADDRESS }}" \
-e ANSIBLE_PLAYBOOK_WORKSPACE_PATH=${GITHUB_WORKSPACE}/ansible \
--volumes-from ${{ env.JOB_CONTAINER_NAME }} \
--entrypoint "/bin/bash" \
git.limbosolutions.com/kb/ansible-playbook:dev -c "ls -lah /{GITHUB_WORKSPACE}/ansible; chmod -R a+rwx {GITHUB_WORKSPACE}/ansible; python3 {GITHUB_WORKSPACE}/ansible"
# --entrypoint "/bin/bash" \
#git.limbosolutions.com/kb/ansible-playbook:dev -c "ls -lah ${GITHUB_WORKSPACE}"

View File

@@ -1,69 +0,0 @@
name: Monitoring services CI/CD Pipeline
on:
push:
branches:
- fix/*
- main
- master
paths:
- 'monitoring/**'
- '.gitea/workflows/monitoring**'
pull_request:
paths:
- 'monitoring/**'
- '.gitea/workflows/monitoring**'
schedule:
- cron: '0 12 * * 0' # every sunday 12 am
jobs:
deploy:
runs-on: casa-vlan-cicd
env:
GITHUB_TEMP: ${{ runner.temp }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Fetch limbo public actions
env:
RUNNER_TEMP: "${{ runner.temp }}"
WORKSPACE: "${{ gitea.workspace }}"
run: |
curl -fsSL https://git.limbosolutions.com/kb/gitea/raw/branch/main/cloud-scripts/setup-limbo-actions.sh | bash 2>&1
- name: Setup kubectl
uses: ./.gitea/limbo_actions/kubectl-setup
with:
kube_server: ${{ secrets.CASA_VLAN_KUBE_SERVER }}
kube_ca_base64: ${{ secrets.CASA_VLAN_KUBE_CA_BASE64 }}
kube_token: ${{ secrets.CASA_VLAN_KUBE_TOKEN }}
# secrets.LOKI_URL = https://<LOKISERVER>/loki/api/v1/push
- name: Deploy promtail
shell: bash
env:
LOKI_URL: "${{ secrets.LOKI_URL }}"
run: |
# add repo
helm repo add grafana https://grafana.github.io/helm-charts
# Install & Upgrade
helm upgrade --install promtail grafana/promtail --namespace monitoring \
--values=./monitoring/promtail/values.yaml --set config.clients[0].url=${LOKI_URL}
# - name: Deploy Telegraf
# shell: bash
# run: |
# # add repo
# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
# # Install & Upgrade
# helm upgrade --install prometheus-stack prometheus-community/kube-prometheus-stack \
# --namespace monitoring \
# --values=./monitoring/prometheus/deploy/helm/01-only-crd-and-operator.yaml \
# --values=./monitoring/prometheus/deploy//helm/02-kube-metrics.yaml \
# --values=./monitoring/prometheus/deploy/helm/03-node-exporter.yaml \
# --values=./monitoring/prometheus/deploy/helm/04-kubelet.yaml
# kubectl apply -f ./monitoring/prometheus/deploy/prometheus-agent.yaml

View File

@@ -0,0 +1,13 @@
name: Portainer stack nginx
on:
push:
paths:
- "services/nginx/**"
- ".gitea/workflows/**nginx**.yml"
jobs:
deploy:
runs-on: "homesrv-deploy"
steps:
- name: Portainer stack nginx webhook
run: |
curl -X POST "${{secrets.PORTAINER_NGINX_WEBHOOK_URL}}"

View File

@@ -0,0 +1,13 @@
name: Portainer stack wyoming
on:
push:
paths:
- "services/wyoming/**"
- ".gitea/workflows/**wyoming**.yml"
jobs:
deploy:
runs-on: "homesrv-deploy"
steps:
- name: Portainer stack wyoming webhook
run: |
curl -X POST "${{secrets.PORTAINER_WYOMING_WEBHOOK_URL}}"

View File

@@ -0,0 +1,13 @@
name: Portainer stack zigbee2mqtt
on:
push:
paths:
- "services/zigbee2mqtt/**"
- ".gitea/workflows/**zigbee2mqtt**.yml"
jobs:
deploy:
runs-on: "homesrv-deploy"
steps:
- name: Portainer stack zigbee2mqtt webhook
run: |
curl -X POST "${{secrets.PORTAINER_ZIGBEE2MQTT_WEBHOOK_URL}}"

4
.gitignore vendored
View File

@@ -8,7 +8,3 @@ services/zigbee2mqtt/volumes/*
.vscode/settings.json
gitignore/*
**.local
.secrets
ansible/inventory.yml
.env.d/*
.tmp/**

282
README.md
View File

@@ -1,36 +1,132 @@
# casa
# homesrv01.dev.lan
Welcome to my casa repository, where Home Assistant and other services are hosted.
This repository is dedicated to documenting and maintaining the server configuration and workflows.
Welcome to homesrv01.dev.lan git page.
The primary goal is to establish an k3s cluster capable of isolating services and communications related to home automation at the network level.
The server operates within its own VLAN (`casa`) but requires controlled communication with other VLANs, such as `IOT Vlan`
This repository serves to document and maintain the server where the home assistant runs.
For more information about k3s cluster/nodes setup check [readme](./docs/k3s-cluster.md).
The idea was to create a server with docker where it would be possible to isolate all servers, services and communications related to home automation at network level.
**Table of Contents:**
The server itself is on its own vlan (Vlan: homesrv) but requires communication with the Vlans:
- IOT
Using [code-server docker container](#code-server) for Development / Maintenance.
All host configuration are executed using [ansible](#ansible-roles).
<!-- omit in toc -->
## Table of Contents
- [Services](#services)
- [myInfra stack](#myinfra-stack)
- [SSH](#ssh)
- [nginx](#nginx)
- [code-server](#code-server)
- [Home Assistant](#home-assistant)
- [Lyrion Music Server (LMS)](#lyrion-music-server-lms)
- [Mosquitto](#mosquitto)
- [Wyoming](#wyoming)
- [Zigbee2mqtt](#zigbee2mqtt)
- [Docker devices](#docker-devices)
- [Proxmox - lxc container](#proxmox---lxc-container)
- [Operation System](#operation-system)
- [Packages and Requirements](#packages-and-requirements)
- [Ansible roles](#ansible-roles)
- [myInfra.dev.homesrv01.core](#myinfradevhomesrv01core)
- [myInfra.dev.homesrv01](#myinfradevhomesrv01)
- [fstab](#fstab)
## Home Assistant
## Services
[Git Repo](/:root/marcio.fernandes/homeAssistant)
### myInfra stack
## Lyrion Music Server (LMS)
docker, Portainer, promtail and telegraf [maintained on myInfra repo](/marcio.fernandes&myInfra).
For instructions on setting up the Lyrion Music Server on kubernetes, refer to the [LMS Git Repository](/:root/marcio.fernandes/lms).
### SSH
For information on integrating Lyrion Music Server with Home Assistant, visit the [Home Assistant Git Repository](/:root/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server).
Deployed and maintained by ansible role [myInfra.dev.homesrv1](#ansible-roles).
## Mosquitto
### nginx
[Git Repo](/:root/marcio.fernandes/mosquitto)
Using portainer stack (stack name: nginx) connected to this repo. [Docker Compose](./services/nginx/docker-compose.yaml)
## Wyoming
All configuration is set during docker build.
### code-server
Using [custom code-server docker image](/kb/code-server/), includes:
- docker-cli
- ansible
- ansible-lint
For more flexibility on bind mount, stack is maintained directly on portainer (stack name: code-server).
docker-compose.yml example.
```yaml
version: '3'
services:
code-server:
container_name: code
image: git.limbosolutions.com/kb/code-server:latest
environment:
- PUID=0
- PGID=0
- TZ=Europe/London
- PASSWORD=${CODESERVER_PASSWORD}
- DEFAULT_WORKSPACE=/config/workspace
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- code-server_config:/config
- code-server_root:/root
- has_config:/config/workspace/has/volumes/config
- /:/mnt/hostfs
#- ./../nginx/volumes/nginx_conf:/config/workspace/host/repos/homesrv01.dev.lan/services/nginx/volumes/nginx_conf
#- mosquitto_config:/config/workspace/host/repos/homesrv01.dev.lan/services/mosquitto/volumes/config
#- mosquitto_data:/config/workspace/host/repos/homesrv01.dev.lan/services/mosquitto/volumes/data
#- zigbee2mqtt_data:/config/workspace/host/repos/homesrv01.dev.lan/services/zigbee2mqtt/volumes/data
- hostfs:/mnt/hostfs
networks:
- code
- reverseproxy_public
ports:
- 8444:8443
restart: unless-stopped
volumes:
# zigbee2mqtt_data:
# name: zigbee2mqtt_data
# external: true
code-server_config:
code-server_root:
has_config:
name: has_app_config
external: true
#mosquitto_config:
# external: true
#mosquitto_data:
# external: true
networks:
code:
reverseproxy_public:
external: true
```
### Home Assistant
[Git Repo](https://git.limbosolutions.com/marcio.fernandes/homeAssistant)
### Lyrion Music Server (LMS)
[Check git repo](/marcio.fernandes/lms) for more information how to setup Lyrion Music Server docker container. Or [git repo](/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server) for more information about home assistant integration.
Requires [music docker volume](#myinfradevhomesrv01).
### Mosquitto
[Git Repo](https://git.limbosolutions.com/marcio.fernandes/mosquitto)
### Wyoming
A peer-to-peer protocol for voice assistants (basically JSONL + PCM audio)
@@ -44,11 +140,11 @@ Used in Rhasspy and Home Assistant for communication with voice services.
This is an open standard of the Open Home Foundation.
For more information about home assistant integration [check home assistant repo](/:root/marcio.fernandes/homeassistant#wyoming).
For more information about home assistant integration [check home assistant repo](/marcio.fernandes/homeassistant#wyoming).
Deployments and scripts on services/wyoming repo folder.
Currently using portainer stack (name: wyoming) with git reference to this repo. [docker compose file](./services/wyoming/docker-compose.yaml).
[Continuous deploy](./.gitea/workflows/casa-services-deploy.yaml).
Gitea [Continuous deploy action](./.gitea/workflows/services.wyoming.yml)
Links:
@@ -57,12 +153,154 @@ Links:
- [https://exitcode0.net/posts/wyoming-whisper-docker-compose/](https://exitcode0.net/posts/wyoming-whisper-docker-compose/)
- [https://exitcode0.net/posts/wyoming-piper-docker-compose/](https://exitcode0.net/posts/wyoming-piper-docker-compose/)
## Zigbee2mqtt
### Zigbee2mqtt
Zigbee to MQTT bridge, get rid of your proprietary Zigbee bridges
Attached SONOFF Universal Zigbee 3.0 USB Dongle Plus to the Proxmox node and configure USB passthrough so the VM can use it.
Currently using portainer stack (name: zigbee2mqtt) with git reference to this repo [docker compose](./services/zigbee2mqtt/docker-compose.yaml).
Deployments and scripts on services/Zigbee2mqtt repo folder.
SONOFF Universal Zigbee 3.0 USB Dongle Plus attached on [proxmox host](#proxmox---lxc-container).
[Continuous deploy](./.gitea/workflows/casa-services-deploy.yaml).
Patch security on [proxmox host](#proxmox---lxc-container).
(usb passthrough to [lxc container](#proxmox---lxc-container))
```yaml
#on proxmox hosting server
chown 100000:100020 /dev/ttyUSB0
chown 100000:100020 /dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0
```
#### Docker devices
```yaml
....
devices:
# Make sure this matched your adapter location
- /dev/ttyUSB0:/dev/ttyUSB0
....
```
Links
- [https://www.zigbee2mqtt.io/](https://www.zigbee2mqtt.io/)
- [Home assistant integration](/marcio.fernandes/homeassistant#Zigbee2mqtt)
- [Continuos Deploy - git action](./.gitea/workflows/services.zigbee2mqtt.yml)
## Proxmox - lxc container
Currently hosted on a proxmox ubuntu container.
```bash
# cat /etc/pve/lxc/105.conf
arch: amd64
cmode: shell
cores: 2
features: fuse=1,keyctl=1,nesting=1
hostname: homesrv01
memory: 1500
net0: name=eth0,bridge=vmbr0,firewall=1,ip6=dhcp,...,type=veth
onboot: 1
ostype: ubuntu
protection: 1
rootfs: local-lvm:vm-105-disk-0,size=32G
swap: 1500
unprivileged: 1
lxc.cgroup2.devices.allow: c 189:* rwm
lxc.mount.entry: usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 none bind,optional,create=file
lxc.cgroup2.devices.allow: c 188:* rwm
lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
```
lxc.cgroup2.devices.allow and lxc.mount.entry identification
```bash
# identify usb pen
lsusb
# example
# Bus 001 Device 008: ID 10c4:ea60 Silicon Labs CP210x UART Bridge
#replace with bus and device id
ls -l /dev/bus/usb/001/008
#example result
# crw-rw-r-- 1 root root 189, 7 May 17 15:56 /dev/bus/usb/001/008
# so
#lxc.cgroup2.devices.allow: c 189:* rwm
#lxc.mount.entry: usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 none bind,optional,create=file
ls -l /dev/serial/by-id/
# example result
#lrwxrwxrwx 1 root root 13 May 17 15:56 usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 -> ../../ttyUSB0
ls -l /dev/ttyUSB0
#example result
#crw-rw---- 1 root dialout 188, 0 May 17 15:56 /dev/ttyUSB0
#so
#lxc.cgroup2.devices.allow: c 188:* rwm
#lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
```
## Operation System
```bash
# lsb_release -a
Distributor ID: Ubuntu
Description: Ubuntu 24.04 LTS
Release: 24.04
Codename: noble
# uname -r
6.8.4-3-pve
```
## Packages and Requirements
```bash
apt install ansible
```
## Ansible roles
### myInfra.dev.homesrv01.core
- ssh server
- admin user
- git package
- curl package
```bash
#execute local
sudo ansible-playbook --connection=local --inventory 127.0.0.1, --limit 127.0.0.1 ansible/core.yml
```
### myInfra.dev.homesrv01
[Requires myInfra stack](#myinfra-stack).
- daily auto update script
- music docker volume. [requires sshfs mount to media on nas](#fstab)
```bash
#execute local
sudo ansible-playbook --connection=local --inventory 127.0.0.1, --limit 127.0.0.1 ansible/site.yml
```
``` bash
# check auto update scripts logs
journalctl -r -t auto-update
```
## fstab
```bash
# /etc/fstab
# used by docker volume music
sshfs#media@nas.lan:/home/media /mnt/media@sshfs:nas.lan fuse defaults,_netdev,allow_other,follow_symlinks 0 0
```

6
ansible/core.yml Normal file
View File

@@ -0,0 +1,6 @@
- name: homesrv01 core playbook
hosts: all
roles:
- myInfra.dev.homesrv01.core
- myInfra.ssh

View File

@@ -0,0 +1,27 @@
# Ansible Tower ignore list
# Ansible runtime and backups
*.original
*.tmp
*.bkp
*.retry
*.*~
# Tower runtime roles
roles/**
!roles/myInfra.dev.homesrv01
!roles/requirements.yml
# Try tyo avoid any plain-text passwords
*pwd*
*pass*
*password*
*.txt
# Exclude all binaries
*.bin
*.jar
*.tar
*.zip
*.gzip
*.tgz

View File

@@ -0,0 +1,60 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
# dependencies:
# - role: myInfra.ssh
# - role: myInfra.docker.portainer
# - role: myInfra.docker.promtail
# - role: myInfra.docker.telegraf
# - role: myInfra.dev.homesrv01.core
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,20 @@
---
- name: admin user - setup
become: true
ansible.builtin.user:
name: admin
shell: /bin/bash
groups: sudo,docker
append: yes
- name: Install/Update Packages
become: true
ansible.builtin.package:
name:
- git
- curl
state: latest

View File

@@ -0,0 +1,27 @@
# Ansible Tower ignore list
# Ansible runtime and backups
*.original
*.tmp
*.bkp
*.retry
*.*~
# Tower runtime roles
roles/**
!roles/myInfra.dev.homesrv01
!roles/requirements.yml
# Try tyo avoid any plain-text passwords
*pwd*
*pass*
*password*
*.txt
# Exclude all binaries
*.bin
*.jar
*.tar
*.zip
*.gzip
*.tgz

View File

@@ -0,0 +1,60 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
# dependencies:
# - role: myInfra.dev.homesrv01.core
# - role: myInfra.docker.portainer
# - role: myInfra.docker.promtail
# - role: myInfra.docker.telegraf
# - role: myInfra.dev.homesrv01.core
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,12 @@
---
- name: docker - music volume
community.docker.docker_volume:
name: music
driver: local
driver_options:
type: none
device: /mnt/media@sshfs:nas.lan/music
o: bind

27
ansible/roles/myInfra.ssh/.gitignore vendored Normal file
View File

@@ -0,0 +1,27 @@
# Ansible Tower ignore list
# Ansible runtime and backups
*.original
*.tmp
*.bkp
*.retry
*.*~
# Tower runtime roles
roles/**
!roles/myInfra.dev.homesrv01
!roles/requirements.yml
# Try tyo avoid any plain-text passwords
*pwd*
*pass*
*password*
*.txt
# Exclude all binaries
*.bin
*.jar
*.tar
*.zip
*.gzip
*.tgz

View File

View File

@@ -0,0 +1,11 @@
---
- name: SSH - Restart Service
become: true
ansible.builtin.service:
daemon_reload: true
name: ssh
enabled: true
state: restarted

View File

@@ -0,0 +1,60 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
# dependencies:
# - role: myInfra.journald
# - role: myInfra.docker.portainer
# - role: myInfra.docker.promtail
# - role: myInfra.docker.telegraf
# - role: myInfra.dev.homesrv01.core
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,30 @@
---
- name: Install/Update openssh-server
become: true
ansible.builtin.package:
name:
- openssh-server
state: latest
- name: SSH - Setup & Config
copy:
dest: /etc/ssh/sshd_config.d/10-myLimbo.conf
content: |
###################################################################
# maintained by myInfra.dev.homesrv01 ansible role
# https://git.limbosolutions.com/marcio.fernandes/homesrv01.dev.lan
####################################################################
PermitRootLogin no
PasswordAuthentication no
notify:
- SSH - Restart Service

8
ansible/site.yml Normal file
View File

@@ -0,0 +1,8 @@
- name: homesrv01 playbook
hosts: all
roles:
- myInfra.dev.homesrv01.core
- myInfra.ssh
- myInfra.dev.homesrv01

View File

@@ -1,15 +0,0 @@
# to run on icarus
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: casa-limbosolutions-com
namespace: casa-limbosolutions-com
spec:
secretName: casa-limbosolutions-com-tls
dnsNames:
- "casa.limbosolutions.com"
- "*.casa.limbosolutions.com"
issuerRef:
kind: ClusterIssuer
name: letsencrypt-prod

View File

@@ -1,101 +0,0 @@
{
"folders": [
{
"path": "./",
"name": "casa"
},
{
"path": "../homeAssistant"
},
{
"path": "../lms"
},
{
"path": "../mosquitto"
},
{
"name": "kb",
"path": "../kb"
},
{
"name": "pi.bluetooth.speaker",
"path": "../pi.bluetooth.speaker"
},
],
"settings": {
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true,
"**/Thumbs.db": true,
"kb": true,
"runme.taskProvider.enabled": false,
"runme.scanMode": "off"
},
"ansible.python.interpreterPath": "/bin/python",
"cSpell.words": [
"davidanson",
"eamodio",
"envsubst",
"lmscommunity",
"localtime",
"LOGLEVEL",
"lyrionmusicserver",
"mtxr",
"rclone",
"reverseproxy",
"rogalmic",
"runme",
"sqltools",
"yzhang"
],
"githubLocalActions.workflowsDirectory": ".gitea/workflows"
},
"tasks": {
"version": "2.0.0",
"tasks": [
{
"label": "Run current shell file - relative",
"type": "shell",
"command": "bash",
"args": [
"${file}"
],
"options": {
"cwd": "${fileDirname}"
},
"group": {
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared"
}
},
{
"label": "Run current shell file",
"type": "shell",
"command": "bash",
"args": [
"${file}"
],
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared"
}
}
]
}
}

View File

@@ -1,127 +0,0 @@
# Casa k3s cluster
**Table of Contents:**
- [Nodes](#nodes)
- [Master Node](#master-node)
- [worker node](#worker-node)
## Nodes
**Setup user for ssh access:**
``` bash
apt install sudo
usermod -aG sudo mf
```
**Disable swap:**
``` bash
swapoff -a
Edit /etc/fstab and comment out any swap entries:
# /swapfile none swap sw 0 0
```
**Other Packages:**
``` bash
sudo apt update -y | sudo apt install curl btop -y
```
### Master Node
- debian 12
- 2g ram
- 24Gb disk
``` bash
ip a # check ethernet name
# removes automatic vonfiguration as dhcp client
sed -i '/ens18/d' /etc/network/interfaces
cat <<EOF > /etc/network/interfaces.d/ens18
# my network configuration
auto ens18
iface ens18 inet static
address 192.168.14.9/24
gateway 192.168.0.1
EOF
cat <<EOF > /etc/resolv.conf
domain dev.lan
search dev.lan. lan.
nameserver 192.168.14.1
EOF
```
``` bash
curl -sfL https://get.k3s.io | sh -
```
**Taint NoSchedule on master node:**
kubectl taint nodes <master-node-name> node-role.kubernetes.io/control-plane=:NoSchedule
``` bash
kubectl taint nodes casa node-role.kubernetes.io/control-plane=:NoSchedule
```
### worker node
- debian 12
- 4g ram
- 8Gb OS disk
- 16Gb data disk
``` bash
# execute on server to get token
cat /var/lib/rancher/k3s/server/node-token
```
**Setup worker node:**
``` bash
# install k3s as agent / worker node
TOKEN="???"
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent --data-dir /dataDisk/k3s --server https://casa.dev.lan:6443 --token ${TOKEN}" sh -s -
```
**Move default k3s folder:**
``` bash
apt install rsync
sudo systemctl stop k3s-agent
sudo rsync -a /var/lib/rancher/k3s/ /dataDisk/k3s/
```
Execute install k3s default update script with --data-dir /dataDisk/k3s argument.
Change kubectl -n kube-system edit configmap local-path-config on kube-system to set path to provisioner.
``` yaml
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/var/lib/rancher/k3s/storage"]
},
{
"node":"casa-minion-01",
"paths":["/dataDisk/k3s/storage"]
}
]
}
```
``` bash
rm -rf /var/lib/rancher/k3s
```
**Set node labels:**
``` bash
kubectl label node casa-minion-01 role=worker-node
```

View File

@@ -1,9 +0,0 @@
# Deploy
```bash
set -a
source ./.env
set +a
envsubst < ./secrets.yaml | kubectl apply -f -
kubectl apply -f deploy.yaml
```

View File

@@ -1,235 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: casa-vlan-cicd
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: casa-vlan-cicd
name: casa-vlan-cicd-runners
labels:
app: casa-vlan-cicd-runners
spec:
replicas: 1
selector:
matchLabels:
app: casa-vlan-cicd-runners
template:
metadata:
labels:
app: casa-vlan-cicd-runners
spec:
nodeSelector:
role: worker-node
containers:
- name: mylimbo-casa-vlan-cicd-runners
image: git.limbosolutions.com/kb/gitea/act_runner:0.2.13-network-stack
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: "trace"
- name: CONFIG_FILE
value: /config.yaml
- name: GITEA_INSTANCE_URL
valueFrom:
secretKeyRef:
name: casa-vlan-cicd-runners
key: GITEA_INSTANCE_URL
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: casa-vlan-cicd-runners
key: GITEA_MYLIMBO_RUNNER_REGISTRATION_TOKEN
- name: GITEA_RUNNER_NAME
valueFrom:
secretKeyRef:
name: casa-vlan-cicd-runners
key: GITEA_MYLIMBO_RUNNER_NAME
- name: GITEA_RUNNER_CAPACITY
value: "1"
- name: GITEA_RUNNER_EPHEMERAL
value: "0"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "400m"
volumeMounts:
- name: config-map
mountPath: /config.yaml
subPath: config.yaml
- name: mf-casa-vlan-cicd-runners
image: git.limbosolutions.com/kb/gitea/act_runner:0.2.13-network-stack
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: "trace"
- name: CONFIG_FILE
value: /config.yaml
- name: GITEA_INSTANCE_URL
valueFrom:
secretKeyRef:
name: casa-vlan-cicd-runners
key: GITEA_INSTANCE_URL
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: casa-vlan-cicd-runners
key: GITEA_MF_RUNNER_REGISTRATION_TOKEN
- name: GITEA_RUNNER_NAME
valueFrom:
secretKeyRef:
name: casa-vlan-cicd-runners
key: GITEA_MF_RUNNER_NAME
- name: GITEA_RUNNER_CAPACITY
value: "1"
- name: GITEA_RUNNER_EPHEMERAL
value: "0"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: " 512Mi"
cpu: "400m"
volumeMounts:
- name: config-map
mountPath: /config.yaml
subPath: config.yaml
volumes:
- name: config-map
configMap:
name: casa-vlan-cicd-runners
---
apiVersion: v1
kind: ConfigMap
metadata:
name: casa-vlan-cicd-runners
namespace: casa-vlan-cicd
data:
config.yaml: |
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
executor: shell
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# The timeout for the runner to wait for running jobs to finish when shutting down.
# Any running jobs that haven't finished after this timeout will be cancelled.
shutdown_timeout: 0s
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The github_mirror of a runner is used to specify the mirror address of the github that pulls the action repository.
# It works when something like `uses: actions/checkout@v4` is used and DEFAULT_ACTIONS_URL is set to github,
# and github_mirror is not empty. In this case,
# it replaces https://github.com with the value here, which is useful for some special network environments.
github_mirror: ''
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/docker.gitea.com/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "casa-vlan-cicd"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
# Always require a reachable docker daemon, even if not required by act_runner
require_docker: false
# Timeout to wait for the docker daemon to be reachable, if docker is required by require_docker or act_runner
docker_timeout: 0s
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: casa-vlan-cicd-runners
namespace: casa-vlan-cicd
stringData:
GITEA_INSTANCE_URL: ${GITEA_INSTANCE_URL}
GITEA_RUNNER_REGISTRATION_TOKEN: ${GITEA_RUNNER_REGISTRATION_TOKEN}
GITEA_MYLIMBO_RUNNER_NAME: ${GITEA_MYLIMBO_RUNNER_NAME}
GITEA_MYLIMBO_RUNNER_REGISTRATION_TOKEN: ${GITEA_MYLIMBO_RUNNER_REGISTRATION_TOKEN}
GITEA_MF_RUNNER_NAME: ${GITEA_MF_RUNNER_NAME}
GITEA_MF_RUNNER_REGISTRATION_TOKEN: ${GITEA_MF_RUNNER_REGISTRATION_TOKEN}

View File

@@ -1,22 +0,0 @@
# monitoring
## namespace
``` bash
kubectl create namespace monitoring
```
## promtail
### setup
``` bash
# add repo
helm repo add grafana https://grafana.github.io/helm-charts
# Install & Upgrade
helm upgrade --install promtail grafana/promtail --namespace monitoring \
--values=./promtail/values.yaml \
--values=./promtail//values.local.yaml
```
## Continuous Deploy

View File

@@ -1,97 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: monitoring
name: ci-cd
rules:
- apiGroups: [""]
resources: ["pods", "services", "secrets", "configmaps", "persistentvolumeclaims", "endpoints", "serviceaccounts"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets","daemonsets"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["traefik.io"]
resources: ["ingressroutes"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles", "clusterrolebindings"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
# telegraf
- apiGroups: ["monitoring.coreos.com"]
resources: ["servicemonitors", "podmonitors", "prometheuses", "alertmanagers"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ci-cd
namespace: monitoring
subjects:
- kind: ServiceAccount
name: casa-ci-cd
namespace: home-assistant
roleRef:
kind: Role
name: ci-cd
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ci-cd-cluster
rules:
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles", "clusterrolebindings"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ci-cd-clusterbinding
subjects:
- kind: ServiceAccount
name: casa-ci-cd
namespace: home-assistant
roleRef:
kind: ClusterRole
name: ci-cd-cluster
apiGroup: rbac.authorization.k8s.io
---
# telegraf
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: kube-system
name: ci-cd-kube-system
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ci-cd-kube-system
namespace: kube-system
subjects:
- kind: ServiceAccount
name: casa-ci-cd
namespace: home-assistant
roleRef:
kind: Role
name: ci-cd-kube-system
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,34 +0,0 @@
# Prometheus Setup
- <https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack>
- <https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml>
## helm chart
```bash
#add repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
```
**This helm chart, installs:**
- crd
- Operator
- kubernetes services monitors
```bash
kubectl get namespace monitoring || kubectl create namespace monitoring
helm upgrade --install prometheus-stack prometheus-community/kube-prometheus-stack \
--namespace monitoring \
--values=./helm/01-only-crd-and-operator.yaml \
--values=./helm/02-kube-metrics.yaml \
--values=./helm/03-node-exporter.yaml \
--values=./helm/04-kubelet.yaml \
--values=./helm/10-testing-values.yaml
```
## deploy prometheus agent
```bash
kubectl apply -f ./prometheus-agent.yaml
```

View File

@@ -1,55 +0,0 @@
# values.yaml to install only Prometheus Operator and CRDs
# Disable all components except the operator
defaultRules:
create: false
alertmanager:
enabled: false
grafana:
enabled: false
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
prometheus:
enabled: false
coreDns:
enabled: false
kubeControllerManager:
enabled: false
kubeEtcd:
enabled: false
kubeProxy:
enabled: false
kubeScheduler:
enabled: false
prometheusOperator:
enabled: true
createCustomResource: true
tls:
enabled: false
admissionWebhooks:
enabled: false
cleanupCustomResource: false
serviceMonitor:
selfMonitor: false
kubeletService:
enabled: true
# requires manual creation of service #prom-kublet-service
nodeSelector:
role: worker-node
# global:
# nodeSelector:
# dedicated: worker-node

View File

@@ -1,35 +0,0 @@
kubeStateMetrics:
enabled: true
kube-state-metrics: # ok tested!
podLabels:
role: worker-node
nodeSelector:
role: worker-node
prometheus:
monitor:
relabelings:
- targetLabel: cluster
replacement: casa
additionalLabels:
app.kubernetes.io/name: prometheus-kube-state-metrics # !important: selector used by agent
coreDns: # ok tested!
enabled: true
serviceMonitor:
relabelings:
- targetLabel: cluster
replacement: casa
additionalLabels:
app.kubernetes.io/name: prometheus-stack-coredns # !important: selector used by agent
kubeApiServer: # ok tested!
enabled: true
serviceMonitor:
relabelings:
- targetLabel: cluster
replacement: casa
additionalLabels:
app.kubernetes.io/name: prometheus-stack-apiserver # !important: selector used by agent

View File

@@ -1,19 +0,0 @@
# Deploy node exporter as a daemonset to all nodes
nodeExporter:
enabled: true
# job node exporter
prometheus-node-exporter:
prometheus:
monitor:
enabled: true
relabelings:
# https://github.com/dotdc/grafana-dashboards-kubernetes
- action: replace
sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: nodename
# identification of cluster
- targetLabel: cluster
replacement: casa

View File

@@ -1,77 +0,0 @@
# Used file to testing new options and configurations
# Should be the laste file to be loaded
kubelet:
enabled: true
namespace: kube-system
serviceMonitor:
interval: 30s #WARN: Error on ingesting out-of-order samples. https://github.com/prometheus-community/helm-charts/issues/5483
enabled: true
## Enable scraping /metrics from kubelet's service
kubelet: true
additionalLabels:
app.kubernetes.io/name: prometheus-kubelet # !important: selector used by agent
probesMetricRelabelings:
- targetLabel: cluster
replacement: casa
- sourceLabels: [__name__, image]
separator: ;
regex: container_([a-z_]+);
replacement: $1
action: drop
- sourceLabels: [__name__]
separator: ;
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
replacement: $1
action: drop
# # RelabelConfigs to apply to samples before scraping
# # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
# #
# # metrics_path is required to match upstream rules and charts
cAdvisorRelabelings:
- targetLabel: cluster
replacement: casa
- action: replace
sourceLabels: [__metrics_path__]
targetLabel: metrics_path
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: ^(.*)$
targetLabel: nodename
replacement: $1
action: replace
# # RelabelConfigs to apply to samples before scraping
# # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
# #
probesRelabelings:
- targetLabel: cluster
replacement: casa
- action: replace
sourceLabels: [__metrics_path__]
targetLabel: metrics_path
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: ^(.*)$
targetLabel: nodename
replacement: $1
action: replace
resourceRelabelings:
- targetLabel: cluster
replacement: casa
- action: replace
sourceLabels: [__metrics_path__]
targetLabel: metrics_path

View File

@@ -1 +0,0 @@
# use for testing

File diff suppressed because it is too large Load Diff

View File

@@ -1,71 +0,0 @@
apiVersion: monitoring.coreos.com/v1alpha1
kind: PrometheusAgent
metadata:
name: prometheus-agent
namespace: monitoring
spec:
serviceMonitorNamespaceSelector: {}
podMonitorNamespaceSelector: {}
serviceMonitorSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: Exists
podMonitorSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: Exists
replicas: 1
remoteWrite:
- url: https://prometheus.monitoring.limbosolutions.com/api/v1/write
scrapeInterval: 60s
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
serviceAccountName: prometheus-agent
nodeSelector:
role: worker-node
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-agent
rules:
- apiGroups: [""]
resources: ["nodes", "nodes/metrics", "nodes/proxy", "services", "endpoints", "pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["monitoring.coreos.com"]
resources: ["servicemonitors", "podmonitors"]
verbs: ["get", "list", "watch"]
- nonResourceURLs:
- /metrics
- /metrics/cadvisor
- /metrics/probes
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus-agent-monitoring
roleRef:
kind: ClusterRole
name: prometheus-agent
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: prometheus-agent
namespace: monitoring
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-agent
namespace: monitoring

View File

@@ -1 +0,0 @@
**.local.**

View File

@@ -1,52 +0,0 @@
config:
clients:
- url: "????" #replaced values.local.yaml. Example: https://lokiserver/loki/api/v1/push
# by default all scrap configs had node_name
snippets:
extraRelabelConfigs:
- target_label: host
replacement: ${HOSTNAME}
- target_label: cluster
replacement: casa
extraScrapeConfigs: |
#scrape config for syslog
- job_name: host-journald
journal:
json: true
max_age: 24h
path: /var/log/host/journal
labels:
job: journald
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: 'journal_systemd_unit'
- source_labels: ['__journal_syslog_identifier']
target_label: 'journal_syslog_identifier'
- source_labels: ['__journal__hostname']
target_label: 'journal_hostname'
- target_label: 'host'
replacement: '${HOSTNAME}'
- target_label: 'cluster'
replacement: 'casa'
extraArgs:
- --config.expand-env=true
extraVolumes:
- name: node-logs
hostPath:
path: /var/log
extraVolumeMounts:
- name: node-logs
mountPath: /var/log/host
readOnly: true
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 50Mi

View File

@@ -0,0 +1,29 @@
version: '3'
services:
nginx:
build:
context: ./docker
pull_policy: build
image: homesrv/nginx:latest
volumes:
- etc_ssl:/etc/ssl
ports:
- 443:443
- 80:80
networks:
- public
- private
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
restart: unless-stopped
volumes:
nginx-conf.d:
etc_ssl:
name: nginx_etc_ssl
networks:
public:
name: reverseproxy_public
external: true
private:
name: reverseproxy_private
external: true

View File

@@ -0,0 +1,4 @@
FROM nginx:latest
COPY nginx.conf.d/* /etc/nginx/conf.d

View File

@@ -0,0 +1,44 @@
server {
listen 80;
server_name localhost;
#access_log /var/log/nginx/host.access.log main;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}

View File

@@ -0,0 +1,60 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80 default_server;
proxy_buffering off;
location / {
proxy_pass http://homeassistant-app:80;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location ^~ /code/ {
proxy_pass http://code:8443/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Url-Scheme $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
server {
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
listen 443 ssl default_server;
location ^~ /code/ {
proxy_pass http://code:8443/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Url-Scheme $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

View File

@@ -0,0 +1,22 @@
server {
listen 80;
proxy_buffering off;
server_name lms.lan;
location / {
proxy_pass http://lms-lms-1:9000/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Url-Scheme $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

View File

@@ -0,0 +1,22 @@
server {
listen 80;
proxy_buffering off;
server_name zigbee2mqtt.lan;
location / {
proxy_pass http://zigbee2mqtt:8080/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Url-Scheme $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

View File

@@ -1,32 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: wyoming
name: ci-cd
rules:
- apiGroups: [""]
resources: ["pods", "services", "secrets", "configmaps", "persistentvolumeclaims", "endpoints"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: ["traefik.io"]
resources: ["ingressroutes"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ci-cd
namespace: wyoming
subjects:
- kind: ServiceAccount
name: casa-ci-cd
namespace: home-assistant
roleRef:
kind: Role
name: ci-cd
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: wyoming

View File

@@ -1,148 +0,0 @@
---
# dns: whisper.wyoming.svc.cluster.local
apiVersion: v1
kind: Service
metadata:
name: whisper
namespace: wyoming
labels:
app: wyoming-whisper
spec:
selector:
app: wyoming-whisper
ports:
- name: tcp-whisper
protocol: TCP
port: 10300
targetPort: 10300
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: wyoming
name: wyoming-whisper
labels:
app: wyoming-whisper
spec:
replicas: 1
selector:
matchLabels:
app: wyoming-whisper
template:
metadata:
labels:
app: wyoming-whisper
spec:
nodeSelector:
role: worker-node
containers:
- name: wyoming-whisper
image: rhasspy/wyoming-whisper
volumeMounts:
- mountPath: /data
name: data
args:
- --model
- small-int8
- --language
- pt
- --beam-size
- "4"
- --debug
ports:
- name: tcp-whisper
containerPort: 10300
volumes:
- name: data
persistentVolumeClaim:
claimName: wyoming-whisper
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wyoming-whisper
namespace: wyoming
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
---
# dns: piper.wyoming.svc.cluster.local
apiVersion: v1
kind: Service
metadata:
name: piper
namespace: wyoming
labels:
app: wyoming-piper
spec:
selector:
app: wyoming-piper
ports:
- name: tcp-piper
protocol: TCP
port: 10200
targetPort: 10200
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: wyoming
name: wyoming-piper
labels:
app: wyoming-piper
spec:
replicas: 1
selector:
matchLabels:
app: wyoming-piper
template:
metadata:
labels:
app: wyoming-piper
spec:
nodeSelector:
role: worker-node
containers:
- name: wyoming-piper
image: rhasspy/wyoming-piper
volumeMounts:
- mountPath: /data
name: data
args:
- --voice
- en-gb-southern_english_female-low
ports:
- containerPort: 10200
volumes:
- name: data
persistentVolumeClaim:
claimName: wyoming-piper
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wyoming-piper
namespace: wyoming
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,25 @@
version: '3.8'
services:
whisper:
image: rhasspy/wyoming-whisper
restart: unless-stopped
volumes:
- whisper_data:/data
ports:
- 10300:10300
environment:
- TZ=Europe/Lisbon
command: [ "--model", "medium-int8", "--language", "en" ]
piper:
image: rhasspy/wyoming-piper
restart: unless-stopped
volumes:
- piper_data:/data
ports:
- 10200:10200
environment:
- TZ=Europe/Lisbon
command: [ "--voice", "en-gb-southern_english_female-low" ]
volumes:
whisper_data:
piper_data:

View File

@@ -1,65 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zigbee2mqtt-data
namespace: mqtt
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zigbee2mqtt
namespace: mqtt
labels:
app: zigbee2mqtt
spec:
replicas: 1
selector:
matchLabels:
app: zigbee2mqtt
template:
metadata:
labels:
app: zigbee2mqtt
spec:
containers:
- name: zigbee2mqtt
securityContext:
privileged: true
image: koenkk/zigbee2mqtt
volumeMounts:
- name: zigbee2mqtt-data
mountPath: /app/data
- name: usb-device
mountPath: /dev/ttyUSB0
- name: run-udev
mountPath: run/udev
resources:
requests:
memory: "50Mi"
cpu: "100m"
limits:
memory: "100Mi"
cpu: "200m"
volumes:
- name: usb-device
hostPath:
path: /dev/ttyUSB0
type: CharDevice
- name: run-udev
hostPath:
path: /run/udev
- name: zigbee2mqtt-data
persistentVolumeClaim:
claimName : zigbee2mqtt-data

View File

@@ -0,0 +1,28 @@
version: '3.8'
services:
zigbee2mqtt:
container_name: zigbee2mqtt
image: koenkk/zigbee2mqtt
restart: unless-stopped
volumes:
- data:/app/data
- /run/udev:/run/udev:ro
#ports:
# Frontend port
#- 8085:8080
environment:
- TZ=Europe/Lisbon
devices:
# Make sure this matched your adapter location
- /dev/ttyUSB0:/dev/ttyUSB0
networks:
zigbee2mqtt:
reverseproxy_public:
volumes:
data:
networks:
zigbee2mqtt:
reverseproxy_public:
external: true

View File

@@ -1,7 +0,0 @@
# storage-limbosolutions-com
<https://github.com/seaweedfs/seaweedfs-csi-driver>
```bash {cwd=../}
./storage-limbosolutions-com/ops-scripts/apply.sh
```

View File

@@ -1,49 +0,0 @@
# host and port of your SeaweedFs filer
seaweedfsFiler: "SEAWEEDFS_FILER:8888"
storageClassName: ""
storageClassVolumeBindingMode: Immediate
isDefaultStorageClass: false
tlsSecret: ""
imagePullPolicy: "Always" # "Always"
driverName: storage-limbosolutions-com-csi-driver
controller:
# nodeSelector:
# role: worker-node
resources: {}
livenessProbe:
failureThreshold:
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 60
# DataLocality (inspired by Longhorn) allows instructing the storage-driver which volume-locations will be used or preferred in Pods to read & write.
# e.g. Allows Pods to write preferrably to its local dataCenter volume-servers
# Requires Volume-Servers to be correctly labelled and matching Topology-Info to be passed into seaweedfs-csi-driver node
# Example-Value: "write_preferlocaldc"
dataLocality: "none"
node:
# Deploy node daemonset
enabled: true
# When seaweedfs-csi-driver-node pod on node is recreated, all pods on same node using seaweed-csi PV will stop working.
# For safe update set updateStrategy.type: OnDelete and manually move pods who use seaweed-csi PV, then delete seaweedfs-csi-driver-node damonset pod
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
affinity: {}
# dont set tolerations so is not installed on control-plane/master node
# tolerations: #dedicated=reserved:NoSchedule
# - key: "dedicated"
# operator: "Equal"
# value: "reserved"
# effect: "NoSchedule"
livenessProbe:
failureThreshold:
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 60

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
generatorOptions:
disableNameSuffixHash: true

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: storage-limbosolutions-com
labels:
name: storage-limbosolutions-com

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -e
kubectl kustomize ./storage-limbosolutions-com/deploy/infra | kubectl apply -f -
helm repo add seaweedfs-csi-driver https://seaweedfs.github.io/seaweedfs-csi-driver/helm
helm repo update seaweedfs-csi-driver
helm upgrade --install \
--set seaweedfsFiler=192.168.14.32:7888 \
--set storageClassName=storage-limbosolutions-com \
--set driverName=storage-limbosolutions-com-seaweedfs-csi \
--values=./storage-limbosolutions-com/deploy/helm/values.yaml \
storage-limbosolutions-com-seaweedfs-csi-driver seaweedfs-csi-driver/seaweedfs-csi-driver --namespace storage-limbosolutions-com

View File

@@ -0,0 +1,8 @@
docker image pull git.limbosolutions.com/kb/ansible
docker run --rm \
-v ${PWD}/scripts:/scripts \
-v ${PWD}/workspace:/workspace \
-v ${MY_ANSIBLE_PRIVATE_KEY_FILE}/id_ed25519:/data/ansible_private_key \
git.limbosolutions.com/kb/ansible \
bash /scripts/run.sh

View File

@@ -0,0 +1,11 @@
#/bin/bash
source /scripts/.env.local
echo $ANSIBLE_PRIVATE_KEY > /root/ansible_private_key
source /scripts/.env.local
cd /workspace
chmod 600 /root/ansible_private_key
cat /root/ansible_private_key
export ANSIBLE_HOST_KEY_CHECKING=False
$ANSIBLE_COMMAND

View File

@@ -0,0 +1,9 @@
- name: Homesrv01 playbook
hosts: all
tasks:
- name: Ping my hosts
ansible.builtin.ping:
- name: Print message
ansible.builtin.debug:
msg: Hello world