feat: migrated to k3s

This commit is contained in:
2025-11-15 11:54:06 +00:00
parent a595747c2c
commit 709d6de566
34 changed files with 5903 additions and 802 deletions

View File

@@ -5,9 +5,7 @@
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/base:jammy",
"features": {
"ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {},
"ghcr.io/devcontainers-extra/features/ansible": {},
"ghcr.io/devcontainers/features/docker-outside-of-docker": {},
"ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {}
},
// "features": {},
@@ -32,7 +30,6 @@
"vscode": {
"extensions": [
"ms-kubernetes-tools.vscode-kubernetes-tools",
"redhat.ansible",
"mtxr.sqltools-driver-mysql",
"stateful.runme",
"yzhang.markdown-all-in-one",

View File

@@ -1,7 +1,5 @@
#setup chroma for zsh colorize
chmod +x /home/vscode/lib/chroma
curl https://rclone.org/install.sh | sudo bash
docker context create casa-prod --description "casa prod context" --docker host=ssh://admin@homesrv01.dev.lan
docker context use casa-prod

View File

@@ -1,21 +0,0 @@
on:
push:
schedule:
- cron: '0 16 * * *' # Every day at 16:00
jobs:
deploy-to-homesrv01:
runs-on: "myLimbo-casa-gitea-act-runner"
steps:
- name: Checkout code
uses: actions/checkout@v2
# all certs and key are base64 encoded
- name: docker compose up
env:
KUBERNETES_SERVER: ${{ secrets.KUBERNETES_SERVER }}
KUBERNETES_CLIENT_CRT_BASE64: ${{ secrets.KUBERNETES_CLIENT_CRT_BASE64 }}
KUBERNETES_CLIENT_KEY_BASE64: ${{ secrets.KUBERNETES_CLIENT_KEY_BASE64 }}
KUBERNETES_CRT_AUTHORITY_BASE64: ${{ secrets.KUBERNETES_CRT_AUTHORITY_BASE64 }}
run: |
docker compose -f ./casa-limbosolutions-com/sync-certs-job/docker-compose.yaml up -d --pull always

View File

@@ -1,13 +0,0 @@
on:
push:
schedule:
- cron: '0 5 * * SUN' # Every Sunday at 05:00
jobs:
deploy-to-homesrv01:
runs-on: "myLimbo-casa-gitea-act-runner"
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: docker compose up
run: docker compose -f ./services/wyoming/docker-compose.yaml up -d --pull always

1
.gitignore vendored
View File

@@ -11,3 +11,4 @@ gitignore/*
.secrets
ansible/inventory.yml
.env.d/*
.tmp/**

191
README.md
View File

@@ -3,85 +3,38 @@
Welcome to my home server repository, where Home Assistant and other services are hosted.
This repository is dedicated to documenting and maintaining the server configuration and workflows.
The primary goal is to establish a Docker-based server capable of isolating services and communications related to home automation at the network level.
The primary goal is to establish an k3s cluster capable of isolating services and communications related to home automation at the network level.
The server operates within its own VLAN (`homesrv`) but requires controlled communication with other VLANs, such as `IOT Vlan`
<!-- omit in toc -->
## Table of Contents
**Table of Contents:**
- [Host](#host)
- [Proxmox - container](#proxmox---container)
- [OS](#os)
- [logs](#logs)
- [Host Setup](#host-setup)
- [Home Assistant](#home-assistant)
- [Lyrion Music Server (LMS)](#lyrion-music-server-lms)
- [Mosquitto](#mosquitto)
- [Wyoming](#wyoming)
- [Zigbee2mqtt](#zigbee2mqtt)
- [Development, Maintenance and Deployment](#development-maintenance-and-deployment)
- [Docker context](#docker-context)
### Setup system
## Host Setup
**Install/Update requirements (collections and roles):**
For more information Host Setup check [readme](.docs/master-node.md).
``` bash
ansible-galaxy collection install -r ./ansible/requirements.yml --force
```
``` bash
ansible-playbook ./ansible/site.yml
```
docker, promtail and telegraf configuration [maintained by ansible playbook](./site.yml).
### docker
#### rclone plugin
[https://rclone.org/docker/](https://rclone.org/docker/)
```bash
# execute on server
sudo apt-get -y install fuse
docker plugin install rclone/docker-volume-rclone:amd64 args="-v" --alias rclone --grant-all-permissions
docker plugin list
```
if error when enabling plugin.
*"rclone.sock: connect: no such file or directory"*
remove existing cache.
```bash
rm -r /var/lib/docker-plugins/rclone/cache
mkdir -p /var/lib/docker-plugins/rclone/cache
```
[ansible role for plugin configuration](./rclone.docker-plugin.playbook.yaml)
### nginx
[Docker Compose](./services/nginx/docker-compose.yaml)
All sites configurations set during docker build.
### Home Assistant
## Home Assistant
[Git Repo](/:root/marcio.fernandes/homeAssistant)
### Lyrion Music Server (LMS)
## Lyrion Music Server (LMS)
For instructions on setting up the Lyrion Music Server Docker container, refer to the [LMS Git Repository](/:root/marcio.fernandes/lms).
For information on integrating Lyrion Music Server with Home Assistant, visit the [Home Assistant Git Repository](/:root/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server).
Using [Docker Rclone plugin](https://rclone.org/docker/) for accessing the bucket where music is stored. Configuration is managed via [Ansible playbook](./rclone.docker-plugin.playbook.yml).
```sh
#configure access to s3 bucket
ansible-playbook ./rclone.docker-plugin.playbook.yml
```
### Mosquitto
## Mosquitto
[Git Repo](/:root/marcio.fernandes/mosquitto)
### Wyoming
## Wyoming
A peer-to-peer protocol for voice assistants (basically JSONL + PCM audio)
@@ -97,10 +50,6 @@ This is an open standard of the Open Home Foundation.
For more information about home assistant integration [check home assistant repo](/:root/marcio.fernandes/homeassistant#wyoming).
[Docker compose file](./services/wyoming/docker-compose.yaml).
Continuous deploy [gitea action](.gitea/workflows/deploy-wyoming.yml).
Because of performance wyoming whisper is currently hosted on chimera kubernetes cluster [deployment](./services/wyoming/whisper.kubernetes-deployment.yaml)
Links:
@@ -110,118 +59,12 @@ Links:
- [https://exitcode0.net/posts/wyoming-whisper-docker-compose/](https://exitcode0.net/posts/wyoming-whisper-docker-compose/)
- [https://exitcode0.net/posts/wyoming-piper-docker-compose/](https://exitcode0.net/posts/wyoming-piper-docker-compose/)
### Zigbee2mqtt
## Zigbee2mqtt
Zigbee to MQTT bridge, get rid of your proprietary Zigbee bridges
SONOFF Universal Zigbee 3.0 USB Dongle Plus attached on [proxmox host](#proxmox---lxc-container).
Patch security on [proxmox host](#proxmox---lxc-container).
(usb passthrough to [lxc container](#proxmox---lxc-container))
```yaml
#on proxmox hosting server
chown 100000:100020 /dev/ttyUSB0
chown 100000:100020 /dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0
```
[docker compose](./services/zigbee2mqtt/docker-compose.yaml)
Links
- [https://www.zigbee2mqtt.io/](https://www.zigbee2mqtt.io/)
- [Home assistant integration](/:root/marcio.fernandes/homeassistant#Zigbee2mqtt)
- [Continuos Deploy - git action](./.gitea/workflows/services.zigbee2mqtt.yml)
## Host
### Proxmox - container
Currently hosted on a proxmox ubuntu container.
```bash
# cat /etc/pve/lxc/105.conf
arch: amd64
cmode: shell
cores: 2
features: fuse=1,keyctl=1,nesting=1
hostname: homesrv01
memory: 1500
net0: name=eth0,bridge=vmbr0,firewall=1,ip6=dhcp,...,type=veth
onboot: 1
ostype: ubuntu
protection: 1
rootfs: local-lvm:vm-105-disk-0,size=32G
swap: 1500
unprivileged: 1
lxc.cgroup2.devices.allow: c 189:* rwm
lxc.mount.entry: usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 none bind,optional,create=file
lxc.cgroup2.devices.allow: c 188:* rwm
lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
```
lxc.cgroup2.devices.allow and lxc.mount.entry identification
```bash
# identify usb pen
lsusb
# example
# Bus 001 Device 008: ID 10c4:ea60 Silicon Labs CP210x UART Bridge
#replace with bus and device id
ls -l /dev/bus/usb/001/008
#example result
# crw-rw-r-- 1 root root 189, 7 May 17 15:56 /dev/bus/usb/001/008
# so
#lxc.cgroup2.devices.allow: c 189:* rwm
#lxc.mount.entry: usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 none bind,optional,create=file
ls -l /dev/serial/by-id/
# example result
#lrwxrwxrwx 1 root root 13 May 17 15:56 usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 -> ../../ttyUSB0
ls -l /dev/ttyUSB0
#example result
#crw-rw---- 1 root dialout 188, 0 May 17 15:56 /dev/ttyUSB0
#so
#lxc.cgroup2.devices.allow: c 188:* rwm
#lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
```
### OS
```bash
# lsb_release -a
Distributor ID: Ubuntu
Description: Ubuntu 24.04 LTS
Release: 24.04
Codename: noble
# uname -r
6.8.4-3-pve
```
### logs
```bash
# check auto update scripts logs
journalctl -r -t auto-update
```
Attached SONOFF Universal Zigbee 3.0 USB Dongle Plus to the Proxmox node and configure USB passthrough so the VM can use it.
## Development, Maintenance and Deployment
Using visual studio code, docker, ansible and gitea actions.
### Docker context
```bash
# create context to homesrv01 docker on development devices
docker context create homesrv01 --docker host=ssh://admin@homesrv01.dev.lan
```
Using visual studio code an dev container.

View File

@@ -1,10 +0,0 @@
[defaults]
inventory = ansible/inventory.yml
# Chick-Fil-A would like a word...
nocows = True
# Installs collections into [current dir]/ansible_collections/namespace/collection_name
collections_paths = ./ansible/collections
# Installs roles into [current dir]/roles/namespace.rolename
roles_path = ./ansible/roles

View File

@@ -1,40 +0,0 @@
- name: Setup rclone docker plugin
become: true
## vars:
# rclone_docker_plugin_config: |
# [s3-limbo-storage]
# type = s3
# provider = SeaweedFS
# access_key_id = !!! SET ON INVENTORY !!!
# secret_access_key = !!! SET ON INVENTORY !!!
# endpoint = !!! SET ON INVENTORY !!!
hosts:
- casa
tasks:
- name: Enforce folders
ansible.builtin.file:
path: /var/lib/docker-plugins/rclone/config
state: directory
owner: root
group: root
mode: u=rwx,g=r,o-rwx
recurse: true
- name: Enforce folders
ansible.builtin.file:
path: /var/lib/docker-plugins/rclone/cache
state: directory
owner: root
group: root
mode: u=rwx,g=r,o-rwx
recurse: true
- name: Setup rclone s3
ansible.builtin.copy:
dest: /var/lib/docker-plugins/rclone/config/rclone.conf
owner: root
group: root
mode: u=rwx,g-rwx,o-rwx
content: "{{ rclone_docker_plugin_config }}"

View File

@@ -1,4 +0,0 @@
collections:
- name: mylimbo.globals
source: ssh://git@git.limbosolutions.com:2222/myLimbo/ansible.collection.globals.git
type: git

View File

@@ -1,20 +0,0 @@
- name: Setup system
hosts:
- casa
roles:
- role: mylimbo.globals.docker
- role: mylimbo.globals.docker_loki_client
vars:
docker_loki_client_config:
hostname: "{{ inventory_hostname }}"
loki:
address: "{{ loki_address }}"
- role: mylimbo.globals.docker_telegraf
vars:
hostname: "{{ telegraf_hostname }}"
influxdb2:
org: "{{ telegraf_influxdb_org }}"
url: "{{ telegraf_influxdb_url }}"
token: "{{ telegraf_influxdb_token }}"
bucket: "{{ telegraf_influxdb_bucket }}"

View File

@@ -1,29 +0,0 @@
# casa.limbosolutions.com at icarus
Use icarus cluster context to all documentation and scrips on this folder. [Check Instructions](#icarus-cluster---access) for how to setup required user and roles on icurus and client kubeconfig.
## certificates (wildcard)
```bash
kubectl apply -f ./certs.yaml
```
```bash
#check certificates
kubectl get cert -n casa-limbosolutions-com
```
## Icarus cluster - access
On user computer.
*Access to k3s context not required.*
```bash
# create private key
openssl genrsa -out ../../.env.d/kube/casa@icarus-user.key 2048
# create csr
openssl req -new -key ../../.env.d/kube/casa@icarus-user.key -out ../../.env.d/kube/casa@icarus-user.csr -subj "/CN=casa/O=limbosolutions"
```
Follow instructions to [setup user and roles on icarus k3s cluster](./k3s-admin.md), and setup kubectl config [kube config](./k3s-kubctl-config.md).

View File

@@ -1,14 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: casa-limbosolutions-com
namespace: casa-limbosolutions-com
spec:
secretName: casa-limbosolutions-com-tls
dnsNames:
- "casa.limbosolutions.com"
- "*.casa.limbosolutions.com"
issuerRef:
kind: ClusterIssuer
name: letsencrypt-prod

View File

@@ -1,95 +0,0 @@
# casa on Icarus - admin
Requires kubernetes admin user access to icarus. All documentation and scripts must be executed on icarus context with an admin account.
Currently using an symbolic on icarus project on my dev device to this file.
## kubernetes Namespace
```bash
# delete namespace
kubectl create namespace casa-limbosolutions-com
```
```bash
# delete namespace
kubectl delete namespace casa-limbosolutions-com
```
## Roles and Bindings
``` yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
namespace: casa-limbosolutions-com
name: casa-limbosolutions-com
rules:
- apiGroups:
- ""
- cert-manager.io # to access deployments certs from cert-manager
- apps # to access deployments
- networking.k8s.io # to access ingresses
resources:
- pods
- services
- secrets
- certificates
- deployments
- configmaps
- ingresses
- persistentvolumeclaims
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- deletecollection
```
``` yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: casa-limbosolutions-com-rb # Replace with your role binding name
namespace: casa-limbosolutions-com # Replace with your namespace
subjects:
- kind: User # or "ServiceAccount" for service accounts
name: casa # Replace with the username or service account name
apiGroup: rbac.authorization.k8s.io
namespace: casa-limbosolutions-com
roleRef:
kind: ClusterRole
name: casa-limbosolutions-com # The name of the role you created
apiGroup: rbac.authorization.k8s.io
```
### kubernetes User
```bash
#Deploy csr to k3s
cat <<EOF | kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: casa-user-csr
spec:
request: $(cat ../.env.d/.kube/casa@icarus-user.csr | base64 | tr -d '\n')
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
```
```bash
# Approve csr
kubectl certificate approve casa-user-csr
```
```bash
# Download kubernet user crt
kubectl get csr casa-user-csr -o jsonpath='{.status.certificate}' | base64 --decode > ./.env.d/casa@icarus-user.crt
```

View File

@@ -1,74 +0,0 @@
configs:
sync-certs-sh:
content: |
#!/bin/bash
##############################################################################################################################
set -e
mkdir -p /tmp/.kube
echo "Trace: Setup kube"
echo "Trace: Processing KUBERNETES_CRT_AUTHORITY_BASE64"
base64 -d <<< "${KUBERNETES_CRT_AUTHORIT_BASE64}" > "$${KUBERNETES_CRT_AUTHORITY}"
echo "Trace: Processing KUBERNETES_CLIENT_CRT_BASE64"
base64 -d <<< "${KUBERNETES_CLIENT_CRT_BASE64}" > "$${KUBERNETES_CLIENT_CRT}"
echo "Trace: Processing KUBERNETES_CLIENT_KEY_BASE64"
base64 -d <<< "${KUBERNETES_CLIENT_KEY_BASE64}" > "$${KUBERNETES_CLIENT_KEY}"
# while true ; do
# sleep 5
# done
echo "Trace: Fetching secrets"
CERT_NAMES=$(kubectl get secrets \
-n casa-limbosolutions-com \
--server="$${KUBERNETES_SERVER}" \
--client-key="$${KUBERNETES_CLIENT_KEY}" \
--client-certificate="$${KUBERNETES_CLIENT_CRT}" \
--insecure-skip-tls-verify \
-o json | jq -r '.items[].metadata.name')
for CERT_NAME in $$CERT_NAMES; do
echo "Trace: Syncing certificate: $$CERT_NAME"
kubectl get secret "$$CERT_NAME" \
-n casa-limbosolutions-com \
--server="$${KUBERNETES_SERVER}" \
--client-key="$${KUBERNETES_CLIENT_KEY}" \
--client-certificate="$${KUBERNETES_CLIENT_CRT}" \
--insecure-skip-tls-verify \
-o json | \
jq -r '.data | to_entries[] | "\(.key) \(.value)"' | \
while IFS=' ' read -r KEY VALUE; do
echo "Processing key: $$KEY"
# Decode the base64 value and save it to the appropriate file
echo "Trace: Saving key: /etc/ssl/certs/casa-limbosolutions-com-certs/$${CERT_NAME}_$${KEY}"
echo "$$VALUE" | base64 -d > "/etc/ssl/certs/casa-limbosolutions-com-certs/$${CERT_NAME}_$${KEY}"
done
done
echo "Info: Certificates synced successfully."
services:
kubectl:
image: bitnami/kubectl:latest
environment:
KUBERNETES_SERVER: ${KUBERNETES_SERVER}
KUBERNETES_CRT_AUTHORITY: /tmp/.kube/ca.crt
KUBERNETES_CLIENT_CRT: /tmp/.kube/client.crt
KUBERNETES_CLIENT_KEY: /tmp/.kube/client.key
container_name: sync-certs-job
entrypoint: bash -c /app/sync-certs.sh
configs:
- source: sync-certs-sh
target: /app/sync-certs.sh
mode: 0755
volumes:
- casa-certs:/etc/ssl/certs/casa-limbosolutions-com-certs:rw
volumes:
casa-certs:
name: casa-limbosolutions-com-certs
external: true # Atention permission must be set to 1001:1001 (using chown on nginx container command)

59
docs/master-node.md Normal file
View File

@@ -0,0 +1,59 @@
# Master Node
- debian 12
- 2g ram
- 24Gb disk
**Table of Contents:**
- [Host Setup](#host-setup)
## Host Setup
``` bash
ip a # check ethernet name
# removes automatic vonfiguration as dhcp client
sed -i '/ens18/d' /etc/network/interfaces
cat <<EOF > /etc/network/interfaces.d/ens18
# my network configuration
auto ens18
iface ens18 inet static
address 192.168.14.9/24
gateway 192.168.0.1
EOF
cat <<EOF > /etc/resolv.conf
domain dev.lan
search dev.lan. lan.
nameserver 192.168.14.1
EOF
```
**Setup user for ssh access:**
``` bash
apt install sudo
usermod -aG sudo mf
```
**Disable swap:**
``` bash
swapoff -a
Edit /etc/fstab and comment out any swap entries:
# /swapfile none swap sw 0 0
```
**Other Packages:**
``` bash
sudo apt update -y | sudo apt install curl btop -y
# /swapfile none swap sw 0 0
```
``` bash
curl -sfL https://get.k3s.io | sh -
```

View File

@@ -0,0 +1,34 @@
# Prometheus Setup
- <https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack>
- <https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml>
## helm chart
```bash
#add repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
```
**This helm chart, installs:**
- crd
- Operator
- kubernetes services monitors
```bash
kubectl get namespace monitoring || kubectl create namespace monitoring
helm upgrade --install prometheus-stack prometheus-community/kube-prometheus-stack \
--namespace monitoring \
--values=./helm/01-only-crd-and-operator.yaml \
--values=./helm/02-kube-metrics.yaml \
--values=./helm/03-node-exporter.yaml \
--values=./helm/04-kubelet.yaml \
--values=./helm/10-testing-values.yaml
```
## deploy prometheus agent
```bash
kubectl apply -f ./prometheus-agent.yaml
```

View File

@@ -0,0 +1,48 @@
# values.yaml to install only Prometheus Operator and CRDs
# Disable all components except the operator
defaultRules:
create: false
alertmanager:
enabled: false
grafana:
enabled: false
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
prometheus:
enabled: false
coreDns:
enabled: false
kubeControllerManager:
enabled: false
kubeEtcd:
enabled: false
kubeProxy:
enabled: false
kubeScheduler:
enabled: false
prometheusOperator:
enabled: true
createCustomResource: true
tls:
enabled: false
admissionWebhooks:
enabled: false
cleanupCustomResource: false
serviceMonitor:
selfMonitor: false
kubeletService:
enabled: false

View File

@@ -0,0 +1,29 @@
kubeStateMetrics:
enabled: true
kube-state-metrics: # ok tested!
prometheus:
monitor:
relabelings:
- targetLabel: cluster
replacement: casa
additionalLabels:
app.kubernetes.io/name: prometheus-kube-state-metrics # !important: selector used by agent
coreDns: # ok tested!
enabled: true
serviceMonitor:
relabelings:
- targetLabel: cluster
replacement: casa
additionalLabels:
app.kubernetes.io/name: prometheus-stack-coredns # !important: selector used by agent
kubeApiServer: # ok tested!
enabled: true
serviceMonitor:
relabelings:
- targetLabel: cluster
replacement: casa
additionalLabels:
app.kubernetes.io/name: prometheus-stack-apiserver # !important: selector used by agent

View File

@@ -0,0 +1,19 @@
# Deploy node exporter as a daemonset to all nodes
nodeExporter:
enabled: true
# job node exporter
prometheus-node-exporter:
prometheus:
monitor:
enabled: true
relabelings:
# https://github.com/dotdc/grafana-dashboards-kubernetes
- action: replace
sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: nodename
# identification of cluster
- targetLabel: cluster
replacement: casa

View File

@@ -0,0 +1,83 @@
prometheusOperator:
kubeletService:
enabled: true
# requires manual creation of service #prom-kublet-service
# Used file to testing new options and configurations
# Should be the laste file to be loaded
kubelet:
enabled: true
namespace: kube-system
serviceMonitor:
interval: 30s #WARN: Error on ingesting out-of-order samples. https://github.com/prometheus-community/helm-charts/issues/5483
enabled: true
## Enable scraping /metrics from kubelet's service
kubelet: true
additionalLabels:
app.kubernetes.io/name: prometheus-kubelet # !important: selector used by agent
probesMetricRelabelings:
- targetLabel: cluster
replacement: casa
- sourceLabels: [__name__, image]
separator: ;
regex: container_([a-z_]+);
replacement: $1
action: drop
- sourceLabels: [__name__]
separator: ;
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
replacement: $1
action: drop
# # RelabelConfigs to apply to samples before scraping
# # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
# #
# # metrics_path is required to match upstream rules and charts
cAdvisorRelabelings:
- targetLabel: cluster
replacement: casa
- action: replace
sourceLabels: [__metrics_path__]
targetLabel: metrics_path
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: ^(.*)$
targetLabel: nodename
replacement: $1
action: replace
# # RelabelConfigs to apply to samples before scraping
# # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
# #
probesRelabelings:
- targetLabel: cluster
replacement: casa
- action: replace
sourceLabels: [__metrics_path__]
targetLabel: metrics_path
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: ^(.*)$
targetLabel: nodename
replacement: $1
action: replace
resourceRelabelings:
- targetLabel: cluster
replacement: casa
- action: replace
sourceLabels: [__metrics_path__]
targetLabel: metrics_path

View File

@@ -0,0 +1 @@
# use for testing

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
apiVersion: monitoring.coreos.com/v1alpha1
kind: PrometheusAgent
metadata:
name: prometheus-agent
namespace: monitoring
spec:
serviceMonitorNamespaceSelector: {}
podMonitorNamespaceSelector: {}
serviceMonitorSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: Exists
podMonitorSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: Exists
replicas: 1
remoteWrite:
- url: https://prometheus.monitoring.limbosolutions.com/api/v1/write
scrapeInterval: 60s
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 180Mi
serviceAccountName: prometheus-agent
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-agent
rules:
- apiGroups: [""]
resources: ["nodes", "nodes/metrics", "nodes/proxy", "services", "endpoints", "pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["monitoring.coreos.com"]
resources: ["servicemonitors", "podmonitors"]
verbs: ["get", "list", "watch"]
- nonResourceURLs:
- /metrics
- /metrics/cadvisor
- /metrics/probes
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus-agent-monitoring
roleRef:
kind: ClusterRole
name: prometheus-agent
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: prometheus-agent
namespace: monitoring
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-agent
namespace: monitoring

1
monitoring/promtail/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
**.local.**

View File

@@ -0,0 +1,12 @@
# Promtail
``` bash
kubectl get namespace monitoring || kubectl create namespace monitoring
# add repo
helm repo add grafana https://grafana.github.io/helm-charts
# Install & Upgrade
helm upgrade --install promtail grafana/promtail --namespace monitoring \
--values=./values.yaml \
--values=./values.local.yaml
```

View File

@@ -0,0 +1,46 @@
config:
clients:
- url: "????" #replaced values.local.yaml. Example: https://lokiserver/loki/api/v1/push
# by default all scrap configs had node_name
snippets:
extraScrapeConfigs: |
#scrape config for syslog
- job_name: host-journald
journal:
json: true
max_age: 24h
path: /var/log/host/journal
labels:
job: journald
relabel_configs:
- source_labels: ['__journal__systemd_unit']
target_label: 'journal_systemd_unit'
- source_labels: ['__journal_syslog_identifier']
target_label: 'journal_syslog_identifier'
- source_labels: ['__journal__hostname']
target_label: 'journal_hostname'
- target_label: 'host'
replacement: '${HOSTNAME}'
- target_label: 'cluster'
replacement: 'casa'
extraArgs:
- --config.expand-env=true
extraVolumes:
- name: node-logs
hostPath:
path: /var/log
extraVolumeMounts:
- name: node-logs
mountPath: /var/log/host
readOnly: true
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 50Mi

View File

@@ -1,140 +0,0 @@
services:
act-runner:
container_name: myLimbo-casa-gitea-act-runner
image: docker.io/gitea/act_runner:latest
restart: always
volumes:
- myLimbo-casa-gitea-act-runner-data:/data
- myLimbo-casa-gitea-act-runner-config:/config
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=${GITEA_INSTANCE_URL}
# When using Docker Secrets, it's also possible to use
# GITEA_RUNNER_REGISTRATION_TOKEN_FILE to pass the location.
# The env var takes precedence.
# Needed only for the first start.
- CONFIG_FILE= /config/config.yaml
- GITEA_RUNNER_REGISTRATION_TOKEN=${GITEA_RUNNER_REGISTRATION_TOKEN}
- GITEA_RUNNER_NAME=myLimbo-casa-gitea-act-runner
#- GITEA_RUNNER_CONFIG_FILE="/config/config.yaml"
configs:
- source: act-runner-config
target: /config/config.yaml
mode: 0444
volumes:
myLimbo-casa-gitea-act-runner-data:
name: myLimbo-casa-gitea-act-runner-data
myLimbo-casa-gitea-act-runner-config:
name: myLimbo-casa-gitea-act-runner-config
configs:
act-runner-config:
content: |
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# The timeout for the runner to wait for running jobs to finish when shutting down.
# Any running jobs that haven't finished after this timeout will be cancelled.
shutdown_timeout: 0s
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "myLimbo-casa-gitea-act-runner:docker://gitea/runner-images:ubuntu-latest"
#- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
#- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
#- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@@ -1,41 +0,0 @@
configs:
run-sh:
content: |
#!/bin/sh
# patch security so kubctl on sync-certs-job can write to the mounted volume
chown -R 1001:1001 /etc/ssl/certs/casa-limbosolutions-com-certs
while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g "daemon off;"
services:
nginx:
build:
context: ./docker
pull_policy: build
image: homesrv/nginx:latest
volumes:
- casa-limbosolutions-com-certs:/etc/ssl/certs/casa-limbosolutions-com-certs
ports:
- 443:443
- 80:80
networks:
- public
restart: unless-stopped
command: /bin/sh -c '/run.sh'
configs:
- source: run-sh
target: /run.sh
mode: 0755
volumes:
nginx-conf.d:
casa-limbosolutions-com-certs:
name: casa-limbosolutions-com-certs
external: false
networks:
public:
name: reverseproxy_public
external: true

View File

@@ -1,4 +0,0 @@
FROM nginx:latest
COPY nginx.conf.d/* /etc/nginx/conf.d

View File

@@ -1,35 +0,0 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80;
server_name casa.limbosolutions.com *.casa.limbosolutions.com has.lan;
return 301 https://has.casa.limbosolutions.com$request_uri;
}
server {
listen 443 ssl;
ssl_certificate /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.crt;
ssl_certificate_key /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
location / {
proxy_pass http://homeassistant-app:80;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}

View File

@@ -1,32 +0,0 @@
server {
server_name lms.casa.limbosolutions.com music.casa.limbosolutions.com;
listen 443 ssl;
ssl_certificate /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.crt;
ssl_certificate_key /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
location / {
set $upstream lms-lms-1;
#docker default resolver
resolver 127.0.0.11 ipv6=off;
proxy_pass http://$upstream:9002;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Url-Scheme $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
server {
listen 80;
server_name lms.casa.limbosolutions.com music.casa.limbosolutions.com lms.lan music.lan;
return 301 https://lms.casa.limbosolutions.com$request_uri;
}

View File

@@ -1,22 +0,0 @@
server {
listen 80;
proxy_buffering off;
server_name zigbee2mqtt.lan;
location / {
proxy_pass http://zigbee2mqtt:8080/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Url-Scheme $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

View File

@@ -0,0 +1,65 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zigbee2mqtt-data
namespace: mqtt
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zigbee2mqtt
namespace: mqtt
labels:
app: zigbee2mqtt
spec:
replicas: 1
selector:
matchLabels:
app: zigbee2mqtt
template:
metadata:
labels:
app: zigbee2mqtt
spec:
containers:
- name: zigbee2mqtt
securityContext:
privileged: true
image: koenkk/zigbee2mqtt
volumeMounts:
- name: zigbee2mqtt-data
mountPath: /app/data
- name: usb-device
mountPath: /dev/ttyUSB0
- name: run-udev
mountPath: run/udev
resources:
requests:
memory: "50Mi"
cpu: "100m"
limits:
memory: "100Mi"
cpu: "200m"
volumes:
- name: usb-device
hostPath:
path: /dev/ttyUSB0
type: CharDevice
- name: run-udev
hostPath:
path: /run/udev
- name: zigbee2mqtt-data
persistentVolumeClaim:
claimName : zigbee2mqtt-data

View File

@@ -1,28 +0,0 @@
version: '3.8'
services:
zigbee2mqtt:
container_name: zigbee2mqtt
image: koenkk/zigbee2mqtt
restart: unless-stopped
volumes:
- data:/app/data
- /run/udev:/run/udev:ro
#ports:
# Frontend port
#- 8085:8080
environment:
- TZ=Europe/Lisbon
devices:
# Make sure this matched your adapter location
- /dev/ttyUSB0:/dev/ttyUSB0
networks:
zigbee2mqtt:
reverseproxy_public:
volumes:
data:
networks:
zigbee2mqtt:
reverseproxy_public:
external: true