Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 064baa08a4 | |||
| a0ff8a03b5 | |||
| 6030e44a99 |
109
.devcontainer/.zshrc
Normal file
109
.devcontainer/.zshrc
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# If you come from bash you might have to change your $PATH.
|
||||||
|
# export PATH=$HOME/bin:$HOME/.local/bin:/usr/local/bin:$PATH
|
||||||
|
|
||||||
|
# Path to your Oh My Zsh installation.
|
||||||
|
export ZSH="$HOME/.oh-my-zsh"
|
||||||
|
|
||||||
|
# Set name of the theme to load --- if set to "random", it will
|
||||||
|
# load a random theme each time Oh My Zsh is loaded, in which case,
|
||||||
|
# to know which specific one was loaded, run: echo $RANDOM_THEME
|
||||||
|
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
|
||||||
|
ZSH_THEME="gnzh"
|
||||||
|
|
||||||
|
# Set list of themes to pick from when loading at random
|
||||||
|
# Setting this variable when ZSH_THEME="devcontainers"
|
||||||
|
# a theme from this variable instead of looking in $ZSH/themes/
|
||||||
|
# If set to an empty array, this variable will have no effect.
|
||||||
|
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
|
||||||
|
|
||||||
|
# Uncomment the following line to use case-sensitive completion.
|
||||||
|
# CASE_SENSITIVE="true"
|
||||||
|
|
||||||
|
# Uncomment the following line to use hyphen-insensitive completion.
|
||||||
|
# Case-sensitive completion must be off. _ and - will be interchangeable.
|
||||||
|
# HYPHEN_INSENSITIVE="true"
|
||||||
|
|
||||||
|
# Uncomment one of the following lines to change the auto-update behavior
|
||||||
|
# zstyle ':omz:update' mode disabled # disable automatic updates
|
||||||
|
# zstyle ':omz:update' mode auto # update automatically without asking
|
||||||
|
# zstyle ':omz:update' mode reminder # just remind me to update when it's time
|
||||||
|
|
||||||
|
# Uncomment the following line to change how often to auto-update (in days).
|
||||||
|
# zstyle ':omz:update' frequency 13
|
||||||
|
|
||||||
|
# Uncomment the following line if pasting URLs and other text is messed up.
|
||||||
|
# DISABLE_MAGIC_FUNCTIONS="true"
|
||||||
|
|
||||||
|
# Uncomment the following line to disable colors in ls.
|
||||||
|
# DISABLE_LS_COLORS="true"
|
||||||
|
|
||||||
|
# Uncomment the following line to disable auto-setting terminal title.
|
||||||
|
# DISABLE_AUTO_TITLE="true"
|
||||||
|
|
||||||
|
# Uncomment the following line to enable command auto-correction.
|
||||||
|
# ENABLE_CORRECTION="true"
|
||||||
|
|
||||||
|
# Uncomment the following line to display red dots whilst waiting for completion.
|
||||||
|
# You can also set it to another string to have that shown instead of the default red dots.
|
||||||
|
# e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
|
||||||
|
# Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
|
||||||
|
# COMPLETION_WAITING_DOTS="true"
|
||||||
|
|
||||||
|
# Uncomment the following line if you want to disable marking untracked files
|
||||||
|
# under VCS as dirty. This makes repository status check for large repositories
|
||||||
|
# much, much faster.
|
||||||
|
# DISABLE_UNTRACKED_FILES_DIRTY="true"
|
||||||
|
|
||||||
|
# Uncomment the following line if you want to change the command execution time
|
||||||
|
# stamp shown in the history command output.
|
||||||
|
# You can set one of the optional three formats:
|
||||||
|
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
|
||||||
|
# or set a custom format using the strftime function format specifications,
|
||||||
|
# see 'man strftime' for details.
|
||||||
|
# HIST_STAMPS="mm/dd/yyyy"
|
||||||
|
|
||||||
|
# Would you like to use another custom folder than $ZSH/custom?
|
||||||
|
# ZSH_CUSTOM=/path/to/new-custom-folder
|
||||||
|
|
||||||
|
# Which plugins would you like to load?
|
||||||
|
# Standard plugins can be found in $ZSH/plugins/
|
||||||
|
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
|
||||||
|
# Example format: plugins=(rails git textmate ruby lighthouse)
|
||||||
|
# Add wisely, as too many plugins slow down shell startup.
|
||||||
|
plugins=(git timer web-search ssh kubectl helm colorize zsh-autosuggestions rclone ansible)
|
||||||
|
ZSH_COLORIZE_TOOL=chroma
|
||||||
|
|
||||||
|
source $ZSH/oh-my-zsh.sh
|
||||||
|
|
||||||
|
# User configuration
|
||||||
|
|
||||||
|
# export MANPATH="/usr/local/man:$MANPATH"
|
||||||
|
|
||||||
|
# You may need to manually set your language environment
|
||||||
|
# export LANG=en_US.UTF-8
|
||||||
|
|
||||||
|
# Preferred editor for local and remote sessions
|
||||||
|
# if [[ -n $SSH_CONNECTION ]]; then
|
||||||
|
# export EDITOR='vim'
|
||||||
|
# else
|
||||||
|
# export EDITOR='nvim'
|
||||||
|
# fi
|
||||||
|
|
||||||
|
# Compilation flags
|
||||||
|
# export ARCHFLAGS="-arch $(uname -m)"
|
||||||
|
|
||||||
|
# Set personal aliases, overriding those provided by Oh My Zsh libs,
|
||||||
|
# plugins, and themes. Aliases can be placed here, though Oh My Zsh
|
||||||
|
# users are encouraged to define aliases within a top-level file in
|
||||||
|
# the $ZSH_CUSTOM folder, with .zsh extension. Examples:
|
||||||
|
# - $ZSH_CUSTOM/aliases.zsh
|
||||||
|
# - $ZSH_CUSTOM/macos.zsh
|
||||||
|
# For a full list of active aliases, run `alias`.
|
||||||
|
#
|
||||||
|
# Example aliases
|
||||||
|
# alias zshconfig="mate ~/.zshrc"
|
||||||
|
# alias ohmyzsh="mate ~/.oh-my-zsh"
|
||||||
|
DISABLE_AUTO_UPDATE=true
|
||||||
|
DISABLE_UPDATE_PROMPT=true
|
||||||
|
|
||||||
|
export PATH=$PATH:/home/vscode/lib
|
||||||
@@ -1,20 +1,32 @@
|
|||||||
|
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||||
|
// README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu
|
||||||
{
|
{
|
||||||
"name": "casa-dev",
|
"name": "casa-dev",
|
||||||
"image": "git.limbosolutions.com/mylimbo/devcontainers/devops:latest",
|
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||||
"remoteUser": "vscode",
|
"image": "mcr.microsoft.com/devcontainers/base:jammy",
|
||||||
"runArgs": [
|
"features": {
|
||||||
"--hostname=casa-dev"
|
"ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {},
|
||||||
],
|
"ghcr.io/devcontainers-extra/features/ansible": {},
|
||||||
|
"ghcr.io/devcontainers/features/docker-outside-of-docker": {},
|
||||||
|
},
|
||||||
|
// "features": {},
|
||||||
|
|
||||||
|
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||||
|
// "forwardPorts": [],
|
||||||
|
// Configure tool-specific properties.
|
||||||
|
// "customizations": {},
|
||||||
|
|
||||||
"mounts": [
|
"mounts": [
|
||||||
|
"source=${localWorkspaceFolder}/.devcontainer/.zshrc,target=/home/vscode/.zshrc,type=bind",
|
||||||
"source=${localWorkspaceFolder}/../lms,target=/workspaces/lms,type=bind",
|
"source=${localWorkspaceFolder}/../lms,target=/workspaces/lms,type=bind",
|
||||||
"source=${localWorkspaceFolder}/../homeAssistant,target=/workspaces/homeAssistant,type=bind",
|
"source=${localWorkspaceFolder}/../homeAssistant,target=/workspaces/homeAssistant,type=bind",
|
||||||
"source=${localWorkspaceFolder}/../mosquitto,target=/workspaces/mosquitto,type=bind",
|
"source=${localWorkspaceFolder}/../mosquitto,target=/workspaces/mosquitto,type=bind",
|
||||||
"source=${localWorkspaceFolder}/../kb,target=/workspaces/kb,type=bind",
|
"source=${localWorkspaceFolder}/../kb,target=/workspaces/kb,type=bind",
|
||||||
"source=${localWorkspaceFolder}/../pi.bluetooth.speaker,target=/workspaces/pi.bluetooth.speaker,type=bind",
|
|
||||||
"source=${localWorkspaceFolder}/.env.d/kube,target=/home/vscode/.kube,type=bind",
|
"source=${localWorkspaceFolder}/.env.d/kube,target=/home/vscode/.kube,type=bind",
|
||||||
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,consistency=cached",
|
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,consistency=cached",
|
||||||
"source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,type=bind,consistency=cached"
|
"source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,type=bind,consistency=cached"
|
||||||
],
|
],
|
||||||
|
"postCreateCommand": "bash .devcontainer/scripts/postCreate.sh",
|
||||||
"customizations": {
|
"customizations": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"extensions": [
|
"extensions": [
|
||||||
@@ -25,13 +37,11 @@
|
|||||||
"yzhang.markdown-all-in-one",
|
"yzhang.markdown-all-in-one",
|
||||||
"davidanson.vscode-markdownlint",
|
"davidanson.vscode-markdownlint",
|
||||||
"eamodio.gitlens",
|
"eamodio.gitlens",
|
||||||
"m4ns0ur.base64",
|
"m4ns0ur.base64"
|
||||||
"rogalmic.bash-debug",
|
],
|
||||||
"streetsidesoftware.code-spell-checker",
|
"settings": {
|
||||||
"ms-azuretools.vscode-containers",
|
"terminal.integrated.defaultProfile.linux": "zsh"
|
||||||
"sanjulaganepola.github-local-actions",
|
}
|
||||||
"eamodio.gitlens"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
7
.devcontainer/scripts/postCreate.sh
Normal file
7
.devcontainer/scripts/postCreate.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#setup chroma for zsh colorize
|
||||||
|
chmod +x /home/vscode/lib/chroma
|
||||||
|
curl https://rclone.org/install.sh | sudo bash
|
||||||
|
docker context create casa-prod --description "casa prod context" --docker host=ssh://admin@homesrv01.dev.lan
|
||||||
|
docker context use casa-prod
|
||||||
|
|
||||||
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
name: Deploy Casa services CI/CD Pipeline
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- fix/*
|
|
||||||
- main
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- 'services/**'
|
|
||||||
- '.gitea/workflows/casa-services**'
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'services/**'
|
|
||||||
- '.gitea/workflows/casa-services**'
|
|
||||||
schedule:
|
|
||||||
- cron: '0 15 * * 0' # every sunday 3 pm
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
runs-on: casa-vlan-cicd
|
|
||||||
env:
|
|
||||||
GITHUB_TEMP: ${{ runner.temp }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Fetch limbo public actions
|
|
||||||
env:
|
|
||||||
RUNNER_TEMP: "${{ runner.temp }}"
|
|
||||||
WORKSPACE: "${{ gitea.workspace }}"
|
|
||||||
run: |
|
|
||||||
curl -fsSL https://git.limbosolutions.com/kb/gitea/raw/branch/main/cloud-scripts/setup-limbo-actions.sh | bash 2>&1
|
|
||||||
|
|
||||||
- name: Setup kubectl
|
|
||||||
uses: ./.gitea/limbo_actions/kubectl-setup
|
|
||||||
with:
|
|
||||||
kube_server: ${{ secrets.CASA_VLAN_KUBE_SERVER }}
|
|
||||||
kube_ca_base64: ${{ secrets.CASA_VLAN_KUBE_CA_BASE64 }}
|
|
||||||
kube_token: ${{ secrets.CASA_VLAN_KUBE_TOKEN }}
|
|
||||||
|
|
||||||
- name: Deploy zigbee2mqtt
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl apply -f ./services/zigbee2mqtt/deploy/zigbee2mqtt-deploy.yaml
|
|
||||||
|
|
||||||
- name: Deploy wyoming
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
kubectl apply -f ./services/wyoming/deploy/wyoming-deploy.yaml
|
|
||||||
|
|
||||||
# - name: Deploy node-red
|
|
||||||
# shell: bash
|
|
||||||
# run: |
|
|
||||||
# ./services/node-red/ops-scripts/apply-app.sh
|
|
||||||
21
.gitea/workflows/deploy-sync-certs-job.yml
Normal file
21
.gitea/workflows/deploy-sync-certs-job.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 16 * * *' # Every day at 16:00
|
||||||
|
jobs:
|
||||||
|
deploy-to-homesrv01:
|
||||||
|
runs-on: "myLimbo-casa-gitea-act-runner"
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# all certs and key are base64 encoded
|
||||||
|
- name: docker compose up
|
||||||
|
env:
|
||||||
|
KUBERNETES_SERVER: ${{ secrets.KUBERNETES_SERVER }}
|
||||||
|
KUBERNETES_CLIENT_CRT_DATA: ${{ secrets.KUBERNETES_CLIENT_CRT_DATA }}
|
||||||
|
KUBERNETES_CLIENT_KEY_DATA: ${{ secrets.KUBERNETES_CLIENT_KEY_DATA }}
|
||||||
|
KUBERNETES_CRT_AUTHORITY_DATA: ${{ secrets.KUBERNETES_CRT_AUTHORITY_DATA }}
|
||||||
|
run: |
|
||||||
|
docker compose -f ./casa-limbosolutions-com/sync-certs-job/docker-compose.yaml up -d --pull always
|
||||||
|
|
||||||
13
.gitea/workflows/deploy-wyoming.yml
Normal file
13
.gitea/workflows/deploy-wyoming.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 5 * * SUN' # Every Sunday at 05:00
|
||||||
|
jobs:
|
||||||
|
deploy-to-homesrv01:
|
||||||
|
runs-on: "myLimbo-casa-gitea-act-runner"
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: docker compose up
|
||||||
|
run: docker compose -f ./services/wyoming/docker-compose.yaml up -d --pull always
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
name: Monitoring services CI/CD Pipeline
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- fix/*
|
|
||||||
- main
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- 'monitoring/**'
|
|
||||||
- '.gitea/workflows/monitoring**'
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'monitoring/**'
|
|
||||||
- '.gitea/workflows/monitoring**'
|
|
||||||
schedule:
|
|
||||||
- cron: '0 12 * * 0' # every sunday 12 am
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
runs-on: casa-vlan-cicd
|
|
||||||
env:
|
|
||||||
GITHUB_TEMP: ${{ runner.temp }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Fetch limbo public actions
|
|
||||||
env:
|
|
||||||
RUNNER_TEMP: "${{ runner.temp }}"
|
|
||||||
WORKSPACE: "${{ gitea.workspace }}"
|
|
||||||
run: |
|
|
||||||
curl -fsSL https://git.limbosolutions.com/kb/gitea/raw/branch/main/cloud-scripts/setup-limbo-actions.sh | bash 2>&1
|
|
||||||
|
|
||||||
- name: Setup kubectl
|
|
||||||
uses: ./.gitea/limbo_actions/kubectl-setup
|
|
||||||
with:
|
|
||||||
kube_server: ${{ secrets.CASA_VLAN_KUBE_SERVER }}
|
|
||||||
kube_ca_base64: ${{ secrets.CASA_VLAN_KUBE_CA_BASE64 }}
|
|
||||||
kube_token: ${{ secrets.CASA_VLAN_KUBE_TOKEN }}
|
|
||||||
|
|
||||||
# secrets.LOKI_URL = https://<LOKISERVER>/loki/api/v1/push
|
|
||||||
- name: Deploy promtail
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
LOKI_URL: "${{ secrets.LOKI_URL }}"
|
|
||||||
run: |
|
|
||||||
# add repo
|
|
||||||
helm repo add grafana https://grafana.github.io/helm-charts
|
|
||||||
# Install & Upgrade
|
|
||||||
helm upgrade --install promtail grafana/promtail --namespace monitoring \
|
|
||||||
--values=./monitoring/promtail/values.yaml --set config.clients[0].url=${LOKI_URL}
|
|
||||||
|
|
||||||
# - name: Deploy Telegraf
|
|
||||||
# shell: bash
|
|
||||||
# run: |
|
|
||||||
# # add repo
|
|
||||||
# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
|
||||||
# # Install & Upgrade
|
|
||||||
# helm upgrade --install prometheus-stack prometheus-community/kube-prometheus-stack \
|
|
||||||
# --namespace monitoring \
|
|
||||||
# --values=./monitoring/prometheus/deploy/helm/01-only-crd-and-operator.yaml \
|
|
||||||
# --values=./monitoring/prometheus/deploy//helm/02-kube-metrics.yaml \
|
|
||||||
# --values=./monitoring/prometheus/deploy/helm/03-node-exporter.yaml \
|
|
||||||
# --values=./monitoring/prometheus/deploy/helm/04-kubelet.yaml
|
|
||||||
# kubectl apply -f ./monitoring/prometheus/deploy/prometheus-agent.yaml
|
|
||||||
|
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -11,4 +11,3 @@ gitignore/*
|
|||||||
.secrets
|
.secrets
|
||||||
ansible/inventory.yml
|
ansible/inventory.yml
|
||||||
.env.d/*
|
.env.d/*
|
||||||
.tmp/**
|
|
||||||
|
|||||||
7
.vscode/casa.full-stack.code-workspace
vendored
Normal file
7
.vscode/casa.full-stack.code-workspace
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"folders": [
|
||||||
|
{
|
||||||
|
"path": "../../homeAssistant"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
230
README.md
230
README.md
@@ -1,38 +1,89 @@
|
|||||||
# casa
|
# casa
|
||||||
|
|
||||||
Welcome to my casa repository, where Home Assistant and other services are hosted.
|
Welcome to my home server repository, where Home Assistant and other services are hosted.
|
||||||
This repository is dedicated to documenting and maintaining the server configuration and workflows.
|
This repository is dedicated to documenting and maintaining the server configuration and workflows.
|
||||||
|
|
||||||
The primary goal is to establish an k3s cluster capable of isolating services and communications related to home automation at the network level.
|
The primary goal is to establish a Docker-based server capable of isolating services and communications related to home automation at the network level.
|
||||||
The server operates within its own VLAN (`casa`) but requires controlled communication with other VLANs, such as `IOT Vlan`
|
The server operates within its own VLAN (`homesrv`) but requires controlled communication with other VLANs, such as `IOT Vlan`
|
||||||
|
|
||||||
For more information about k3s cluster/nodes setup check [readme](./docs/k3s-cluster.md).
|
<!-- omit in toc -->
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
**Table of Contents:**
|
- [Services](#services)
|
||||||
|
- [myInfra stack](#myinfra-stack)
|
||||||
|
- [docker](#docker)
|
||||||
|
- [rclone plugin](#rclone-plugin)
|
||||||
|
- [nginx](#nginx)
|
||||||
|
- [Home Assistant](#home-assistant)
|
||||||
|
- [Lyrion Music Server (LMS)](#lyrion-music-server-lms)
|
||||||
|
- [Mosquitto](#mosquitto)
|
||||||
|
- [Wyoming](#wyoming)
|
||||||
|
- [Zigbee2mqtt](#zigbee2mqtt)
|
||||||
|
- [Host](#host)
|
||||||
|
- [Proxmox - container](#proxmox---container)
|
||||||
|
- [OS](#os)
|
||||||
|
- [logs](#logs)
|
||||||
|
- [Development, Maintenance and Deployment](#development-maintenance-and-deployment)
|
||||||
|
- [Docker context](#docker-context)
|
||||||
|
|
||||||
- [Home Assistant](#home-assistant)
|
## Services
|
||||||
- [Lyrion Music Server (LMS)](#lyrion-music-server-lms)
|
|
||||||
- [Mosquitto](#mosquitto)
|
|
||||||
- [Wyoming](#wyoming)
|
|
||||||
- [Zigbee2mqtt](#zigbee2mqtt)
|
|
||||||
- [node-red](#node-red)
|
|
||||||
- [core-dns](#core-dns)
|
|
||||||
|
|
||||||
## Home Assistant
|
### myInfra stack
|
||||||
|
|
||||||
|
docker, promtail and telegraf configuration [maintained on myInfra repo](/:root/marcio.fernandes/myInfrastructure).
|
||||||
|
|
||||||
|
### docker
|
||||||
|
|
||||||
|
#### rclone plugin
|
||||||
|
|
||||||
|
[https://rclone.org/docker/](https://rclone.org/docker/)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# execute on server
|
||||||
|
sudo apt-get -y install fuse
|
||||||
|
docker plugin install rclone/docker-volume-rclone:amd64 args="-v" --alias rclone --grant-all-permissions
|
||||||
|
docker plugin list
|
||||||
|
```
|
||||||
|
|
||||||
|
if error when enabling plugin.
|
||||||
|
*"rclone.sock: connect: no such file or directory"*
|
||||||
|
remove existing cache.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -r /var/lib/docker-plugins/rclone/cache
|
||||||
|
mkdir -p /var/lib/docker-plugins/rclone/cache
|
||||||
|
```
|
||||||
|
|
||||||
|
[ansible role for plugin configuration](./rclone.docker-plugin.playbook.yaml)
|
||||||
|
|
||||||
|
### nginx
|
||||||
|
|
||||||
|
[Docker Compose](./services/nginx/docker-compose.yaml)
|
||||||
|
|
||||||
|
All sites configurations set during docker build.
|
||||||
|
|
||||||
|
### Home Assistant
|
||||||
|
|
||||||
[Git Repo](/:root/marcio.fernandes/homeAssistant)
|
[Git Repo](/:root/marcio.fernandes/homeAssistant)
|
||||||
|
|
||||||
## Lyrion Music Server (LMS)
|
### Lyrion Music Server (LMS)
|
||||||
|
|
||||||
For instructions on setting up the Lyrion Music Server on kubernetes, refer to the [LMS Git Repository](/:root/marcio.fernandes/lms).
|
For instructions on setting up the Lyrion Music Server Docker container, refer to the [LMS Git Repository](/:root/marcio.fernandes/lms).
|
||||||
|
|
||||||
For information on integrating Lyrion Music Server with Home Assistant, visit the [Home Assistant Git Repository](/:root/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server).
|
For information on integrating Lyrion Music Server with Home Assistant, visit the [Home Assistant Git Repository](/:root/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server).
|
||||||
|
|
||||||
## Mosquitto
|
Using [Docker Rclone plugin](https://rclone.org/docker/) for accessing the bucket where music is stored. Configuration is managed via [Ansible playbook](./rclone.docker-plugin.playbook.yml).
|
||||||
|
|
||||||
|
```sh
|
||||||
|
#configure access to s3 bucket
|
||||||
|
ansible-playbook ./rclone.docker-plugin.playbook.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mosquitto
|
||||||
|
|
||||||
[Git Repo](/:root/marcio.fernandes/mosquitto)
|
[Git Repo](/:root/marcio.fernandes/mosquitto)
|
||||||
|
|
||||||
## Wyoming
|
### Wyoming
|
||||||
|
|
||||||
A peer-to-peer protocol for voice assistants (basically JSONL + PCM audio)
|
A peer-to-peer protocol for voice assistants (basically JSONL + PCM audio)
|
||||||
|
|
||||||
@@ -48,9 +99,11 @@ This is an open standard of the Open Home Foundation.
|
|||||||
|
|
||||||
For more information about home assistant integration [check home assistant repo](/:root/marcio.fernandes/homeassistant#wyoming).
|
For more information about home assistant integration [check home assistant repo](/:root/marcio.fernandes/homeassistant#wyoming).
|
||||||
|
|
||||||
Deployments and scripts on services/wyoming repo folder.
|
[Docker compose file](./services/wyoming/docker-compose.yaml).
|
||||||
|
|
||||||
[Continuous deploy](./.gitea/workflows/casa-services-deploy.yaml).
|
Continuous deploy [gitea action](.gitea/workflows/deploy-wyoming.yml).
|
||||||
|
|
||||||
|
Because of performance wyoming whisper is currently hosted on chimera kubernetes cluster [deployment](./services/wyoming/whisper.kubernetes-deployment.yaml)
|
||||||
|
|
||||||
Links:
|
Links:
|
||||||
|
|
||||||
@@ -59,41 +112,118 @@ Links:
|
|||||||
- [https://exitcode0.net/posts/wyoming-whisper-docker-compose/](https://exitcode0.net/posts/wyoming-whisper-docker-compose/)
|
- [https://exitcode0.net/posts/wyoming-whisper-docker-compose/](https://exitcode0.net/posts/wyoming-whisper-docker-compose/)
|
||||||
- [https://exitcode0.net/posts/wyoming-piper-docker-compose/](https://exitcode0.net/posts/wyoming-piper-docker-compose/)
|
- [https://exitcode0.net/posts/wyoming-piper-docker-compose/](https://exitcode0.net/posts/wyoming-piper-docker-compose/)
|
||||||
|
|
||||||
## Zigbee2mqtt
|
### Zigbee2mqtt
|
||||||
|
|
||||||
Zigbee to MQTT bridge, get rid of your proprietary Zigbee bridges
|
Zigbee to MQTT bridge, get rid of your proprietary Zigbee bridges
|
||||||
|
|
||||||
Attached SONOFF Universal Zigbee 3.0 USB Dongle Plus to the Proxmox node and configure USB passthrough so the VM can use it.
|
SONOFF Universal Zigbee 3.0 USB Dongle Plus attached on [proxmox host](#proxmox---lxc-container).
|
||||||
|
|
||||||
Deployments and scripts on services/Zigbee2mqtt repo folder.
|
Patch security on [proxmox host](#proxmox---lxc-container).
|
||||||
|
(usb passthrough to [lxc container](#proxmox---lxc-container))
|
||||||
[Continuous deploy](./.gitea/workflows/casa-services-deploy.yaml).
|
|
||||||
|
|
||||||
## node-red
|
|
||||||
|
|
||||||
check [readme](./services/node-red/README.md) for more information on setup and configuration.
|
|
||||||
|
|
||||||
## core-dns
|
|
||||||
|
|
||||||
Remove warning from logs.
|
|
||||||
|
|
||||||
```log
|
|
||||||
[WARNING] No files matching import glob pattern: /etc/coredns/custom/*.server
|
|
||||||
[WARNING] No files matching import glob pattern: /etc/coredns/custom/*.override
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Apply on kubernetes
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: v1
|
#on proxmox hosting server
|
||||||
kind: ConfigMap
|
chown 100000:100020 /dev/ttyUSB0
|
||||||
metadata:
|
chown 100000:100020 /dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0
|
||||||
name: coredns-custom
|
```
|
||||||
namespace: kube-system
|
|
||||||
data:
|
[docker compose](./services/zigbee2mqtt/docker-compose.yaml)
|
||||||
log.override: |
|
|
||||||
#
|
Links
|
||||||
stub.server: |
|
|
||||||
#
|
- [https://www.zigbee2mqtt.io/](https://www.zigbee2mqtt.io/)
|
||||||
|
- [Home assistant integration](/:root/marcio.fernandes/homeassistant#Zigbee2mqtt)
|
||||||
|
- [Continuos Deploy - git action](./.gitea/workflows/services.zigbee2mqtt.yml)
|
||||||
|
|
||||||
|
## Host
|
||||||
|
|
||||||
|
### Proxmox - container
|
||||||
|
|
||||||
|
Currently hosted on a proxmox ubuntu container.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# cat /etc/pve/lxc/105.conf
|
||||||
|
arch: amd64
|
||||||
|
cmode: shell
|
||||||
|
cores: 2
|
||||||
|
features: fuse=1,keyctl=1,nesting=1
|
||||||
|
hostname: homesrv01
|
||||||
|
memory: 1500
|
||||||
|
net0: name=eth0,bridge=vmbr0,firewall=1,ip6=dhcp,...,type=veth
|
||||||
|
onboot: 1
|
||||||
|
ostype: ubuntu
|
||||||
|
protection: 1
|
||||||
|
rootfs: local-lvm:vm-105-disk-0,size=32G
|
||||||
|
swap: 1500
|
||||||
|
unprivileged: 1
|
||||||
|
lxc.cgroup2.devices.allow: c 189:* rwm
|
||||||
|
lxc.mount.entry: usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 none bind,optional,create=file
|
||||||
|
lxc.cgroup2.devices.allow: c 188:* rwm
|
||||||
|
lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
|
||||||
|
```
|
||||||
|
|
||||||
|
lxc.cgroup2.devices.allow and lxc.mount.entry identification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# identify usb pen
|
||||||
|
lsusb
|
||||||
|
|
||||||
|
# example
|
||||||
|
# Bus 001 Device 008: ID 10c4:ea60 Silicon Labs CP210x UART Bridge
|
||||||
|
|
||||||
|
|
||||||
|
#replace with bus and device id
|
||||||
|
ls -l /dev/bus/usb/001/008
|
||||||
|
|
||||||
|
#example result
|
||||||
|
# crw-rw-r-- 1 root root 189, 7 May 17 15:56 /dev/bus/usb/001/008
|
||||||
|
|
||||||
|
# so
|
||||||
|
|
||||||
|
#lxc.cgroup2.devices.allow: c 189:* rwm
|
||||||
|
#lxc.mount.entry: usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 none bind,optional,create=file
|
||||||
|
|
||||||
|
ls -l /dev/serial/by-id/
|
||||||
|
# example result
|
||||||
|
#lrwxrwxrwx 1 root root 13 May 17 15:56 usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0 -> ../../ttyUSB0
|
||||||
|
|
||||||
|
ls -l /dev/ttyUSB0
|
||||||
|
#example result
|
||||||
|
#crw-rw---- 1 root dialout 188, 0 May 17 15:56 /dev/ttyUSB0
|
||||||
|
|
||||||
|
#so
|
||||||
|
|
||||||
|
#lxc.cgroup2.devices.allow: c 188:* rwm
|
||||||
|
#lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
|
||||||
|
```
|
||||||
|
|
||||||
|
### OS
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# lsb_release -a
|
||||||
|
Distributor ID: Ubuntu
|
||||||
|
Description: Ubuntu 24.04 LTS
|
||||||
|
Release: 24.04
|
||||||
|
Codename: noble
|
||||||
|
|
||||||
|
# uname -r
|
||||||
|
6.8.4-3-pve
|
||||||
|
```
|
||||||
|
|
||||||
|
### logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# check auto update scripts logs
|
||||||
|
journalctl -r -t auto-update
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development, Maintenance and Deployment
|
||||||
|
|
||||||
|
Using visual studio code, docker, ansible and gitea actions.
|
||||||
|
|
||||||
|
### Docker context
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# create context to homesrv01 docker on development devices
|
||||||
|
docker context create homesrv01 --docker host=ssh://admin@homesrv01.dev.lan
|
||||||
```
|
```
|
||||||
|
|||||||
2
ansible.cfg
Normal file
2
ansible.cfg
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[defaults]
|
||||||
|
inventory = ansible/inventory.yml
|
||||||
29
casa-limbosolutions-com/icarus/README.md
Normal file
29
casa-limbosolutions-com/icarus/README.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# casa.limbosolutions.com at icarus
|
||||||
|
|
||||||
|
Use icarus cluster context to all documentation and scrips on this folder. [Check Instructions](#icarus-cluster---access) for how to setup required user and roles on icurus and client kubeconfig.
|
||||||
|
|
||||||
|
## certificates (wildcard)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -f ./certs.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#check certificates
|
||||||
|
kubectl get cert -n casa-limbosolutions-com
|
||||||
|
```
|
||||||
|
|
||||||
|
## Icarus cluster - access
|
||||||
|
|
||||||
|
On user computer.
|
||||||
|
*Access to k3s context not required.*
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# create private key
|
||||||
|
openssl genrsa -out ../../.env.d/kube/casa@icarus-user.key 2048
|
||||||
|
|
||||||
|
# create csr
|
||||||
|
openssl req -new -key ../../.env.d/kube/casa@icarus-user.key -out ../../.env.d/kube/casa@icarus-user.csr -subj "/CN=casa/O=limbosolutions"
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow instructions to [setup user and roles on icarus k3s cluster](./k3s-admin.md), and setup kubectl config [kube config](./k3s-kubctl-config.md).
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
# to run on icarus
|
|
||||||
|
|
||||||
apiVersion: cert-manager.io/v1
|
apiVersion: cert-manager.io/v1
|
||||||
kind: Certificate
|
kind: Certificate
|
||||||
95
casa-limbosolutions-com/icarus/k3s-admin.md
Normal file
95
casa-limbosolutions-com/icarus/k3s-admin.md
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
# casa on Icarus - admin
|
||||||
|
|
||||||
|
Requires kubernetes admin user access to icarus. All documentation and scripts must be executed on icarus context with an admin account.
|
||||||
|
|
||||||
|
Currently using an symbolic on icarus project on my dev device to this file.
|
||||||
|
|
||||||
|
## kubernetes Namespace
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# delete namespace
|
||||||
|
kubectl create namespace casa-limbosolutions-com
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# delete namespace
|
||||||
|
kubectl delete namespace casa-limbosolutions-com
|
||||||
|
```
|
||||||
|
|
||||||
|
## Roles and Bindings
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
namespace: casa-limbosolutions-com
|
||||||
|
name: casa-limbosolutions-com
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
- cert-manager.io # to access deployments certs from cert-manager
|
||||||
|
- apps # to access deployments
|
||||||
|
- networking.k8s.io # to access ingresses
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- services
|
||||||
|
- secrets
|
||||||
|
- certificates
|
||||||
|
- deployments
|
||||||
|
- configmaps
|
||||||
|
- ingresses
|
||||||
|
- persistentvolumeclaims
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- deletecollection
|
||||||
|
```
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: casa-limbosolutions-com-rb # Replace with your role binding name
|
||||||
|
namespace: casa-limbosolutions-com # Replace with your namespace
|
||||||
|
subjects:
|
||||||
|
- kind: User # or "ServiceAccount" for service accounts
|
||||||
|
name: casa # Replace with the username or service account name
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
namespace: casa-limbosolutions-com
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: casa-limbosolutions-com # The name of the role you created
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
```
|
||||||
|
|
||||||
|
### kubernetes User
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#Deploy csr to k3s
|
||||||
|
cat <<EOF | kubectl apply -f -
|
||||||
|
apiVersion: certificates.k8s.io/v1
|
||||||
|
kind: CertificateSigningRequest
|
||||||
|
metadata:
|
||||||
|
name: casa-user-csr
|
||||||
|
spec:
|
||||||
|
request: $(cat ../.env.d/.kube/casa@icarus-user.csr | base64 | tr -d '\n')
|
||||||
|
signerName: kubernetes.io/kube-apiserver-client
|
||||||
|
usages:
|
||||||
|
- client auth
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Approve csr
|
||||||
|
kubectl certificate approve casa-user-csr
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Download kubernet user crt
|
||||||
|
kubectl get csr casa-user-csr -o jsonpath='{.status.certificate}' | base64 --decode > ./.env.d/casa@icarus-user.crt
|
||||||
|
```
|
||||||
1
casa-limbosolutions-com/sync-certs-job/README.md
Normal file
1
casa-limbosolutions-com/sync-certs-job/README.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
78
casa-limbosolutions-com/sync-certs-job/docker-compose.yaml
Normal file
78
casa-limbosolutions-com/sync-certs-job/docker-compose.yaml
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
|
||||||
|
configs:
|
||||||
|
sync-certs-sh:
|
||||||
|
content: |
|
||||||
|
#!/bin/bash
|
||||||
|
##############################################################################################################################
|
||||||
|
# notes:
|
||||||
|
# --certificate-authority="$${KUBERNETES_CRT_AUTHORITY}" not working, dont hnow why, so using --insecure-skip-tls-verify
|
||||||
|
##############################################################################################################################
|
||||||
|
|
||||||
|
|
||||||
|
set -e
|
||||||
|
mkdir -p /tmp/.kube
|
||||||
|
|
||||||
|
echo "Trace: Setup kube"
|
||||||
|
|
||||||
|
echo "Trace: Processing KUBERNETES_CRT_AUTHORITY_DATA"
|
||||||
|
base64 -d <<< "${KUBERNETES_CRT_AUTHORITY_DATA}" > "$${KUBERNETES_CRT_AUTHORITY}"
|
||||||
|
echo "Trace: Processing KUBERNETES_CRT_AUTHORITY_DATA"
|
||||||
|
base64 -d <<< "${KUBERNETES_CLIENT_CRT_DATA}" > "$${KUBERNETES_CLIENT_CRT}"
|
||||||
|
echo "Trace: Processing KUBERNETES_CLIENT_KEY_DATA"
|
||||||
|
base64 -d <<< "${KUBERNETES_CLIENT_KEY_DATA}" > "$${KUBERNETES_CLIENT_KEY}"
|
||||||
|
|
||||||
|
# while true ; do
|
||||||
|
# sleep 5
|
||||||
|
# done
|
||||||
|
|
||||||
|
|
||||||
|
echo "Trace: Fetching secrets"
|
||||||
|
CERT_NAMES=$(kubectl get secrets \
|
||||||
|
-n casa-limbosolutions-com \
|
||||||
|
--server="$${KUBERNETES_SERVER}" \
|
||||||
|
--client-key="$${KUBERNETES_CLIENT_KEY}" \
|
||||||
|
--client-certificate="$${KUBERNETES_CLIENT_CRT}" \
|
||||||
|
--insecure-skip-tls-verify \
|
||||||
|
-o json | jq -r '.items[].metadata.name')
|
||||||
|
|
||||||
|
for CERT_NAME in $$CERT_NAMES; do
|
||||||
|
echo "Trace: Syncing certificate: $$CERT_NAME"
|
||||||
|
kubectl get secret "$$CERT_NAME" \
|
||||||
|
-n casa-limbosolutions-com \
|
||||||
|
--server="$${KUBERNETES_SERVER}" \
|
||||||
|
--client-key="$${KUBERNETES_CLIENT_KEY}" \
|
||||||
|
--client-certificate="$${KUBERNETES_CLIENT_CRT}" \
|
||||||
|
--insecure-skip-tls-verify \
|
||||||
|
-o json | \
|
||||||
|
jq -r '.data | to_entries[] | "\(.key) \(.value)"' | \
|
||||||
|
while IFS=' ' read -r KEY VALUE; do
|
||||||
|
echo "Processing key: $$KEY"
|
||||||
|
# Decode the base64 value and save it to the appropriate file
|
||||||
|
echo "Trace: Saving key: $$KEY"
|
||||||
|
echo "$$VALUE" | base64 -d > "/etc/ssl/certs/casa-limbosolutions-com-certs/$${CERT_NAME}_$${KEY}"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Info: Certificates synced successfully."
|
||||||
|
|
||||||
|
services:
|
||||||
|
kubectl:
|
||||||
|
image: bitnami/kubectl:latest
|
||||||
|
environment:
|
||||||
|
KUBERNETES_SERVER: ${KUBERNETES_SERVER}
|
||||||
|
KUBERNETES_CRT_AUTHORITY: /tmp/.kube/ca.crt
|
||||||
|
KUBERNETES_CLIENT_CRT: /tmp/.kube/client.crt
|
||||||
|
KUBERNETES_CLIENT_KEY: /tmp/.kube/client.key
|
||||||
|
container_name: sync-certs-job
|
||||||
|
entrypoint: bash -c /app/sync-certs.sh
|
||||||
|
configs:
|
||||||
|
- source: sync-certs-sh
|
||||||
|
target: /app/sync-certs.sh
|
||||||
|
mode: 0755
|
||||||
|
volumes:
|
||||||
|
- casa-certs:/etc/ssl/certs/casa-limbosolutions-com-certs:rw
|
||||||
|
volumes:
|
||||||
|
casa-certs:
|
||||||
|
name: casa-limbosolutions-com-certs
|
||||||
|
external: true # Atention permission must be set to 1001:1001 (using chown on nginx container command)
|
||||||
|
|
||||||
@@ -1,101 +0,0 @@
|
|||||||
|
|
||||||
{
|
|
||||||
"folders": [
|
|
||||||
{
|
|
||||||
"path": "./",
|
|
||||||
"name": "casa"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "../homeAssistant"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "../lms"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "../mosquitto"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "kb",
|
|
||||||
"path": "../kb"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "pi.bluetooth.speaker",
|
|
||||||
"path": "../pi.bluetooth.speaker"
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"settings": {
|
|
||||||
"files.exclude": {
|
|
||||||
"**/.git": true,
|
|
||||||
"**/.svn": true,
|
|
||||||
"**/.hg": true,
|
|
||||||
"**/CVS": true,
|
|
||||||
"**/.DS_Store": true,
|
|
||||||
"**/Thumbs.db": true,
|
|
||||||
"kb": true,
|
|
||||||
"runme.taskProvider.enabled": false,
|
|
||||||
"runme.scanMode": "off"
|
|
||||||
|
|
||||||
},
|
|
||||||
|
|
||||||
"ansible.python.interpreterPath": "/bin/python",
|
|
||||||
"cSpell.words": [
|
|
||||||
"davidanson",
|
|
||||||
"eamodio",
|
|
||||||
"envsubst",
|
|
||||||
"lmscommunity",
|
|
||||||
"localtime",
|
|
||||||
"LOGLEVEL",
|
|
||||||
"lyrionmusicserver",
|
|
||||||
"mtxr",
|
|
||||||
"rclone",
|
|
||||||
"reverseproxy",
|
|
||||||
"rogalmic",
|
|
||||||
"runme",
|
|
||||||
"sqltools",
|
|
||||||
"yzhang"
|
|
||||||
],
|
|
||||||
"githubLocalActions.workflowsDirectory": ".gitea/workflows"
|
|
||||||
},
|
|
||||||
"tasks": {
|
|
||||||
"version": "2.0.0",
|
|
||||||
"tasks": [
|
|
||||||
{
|
|
||||||
"label": "Run current shell file - relative",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "bash",
|
|
||||||
"args": [
|
|
||||||
"${file}"
|
|
||||||
],
|
|
||||||
"options": {
|
|
||||||
"cwd": "${fileDirname}"
|
|
||||||
},
|
|
||||||
"group": {
|
|
||||||
},
|
|
||||||
"presentation": {
|
|
||||||
"echo": true,
|
|
||||||
"reveal": "always",
|
|
||||||
"focus": false,
|
|
||||||
"panel": "shared"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "Run current shell file",
|
|
||||||
"type": "shell",
|
|
||||||
"command": "bash",
|
|
||||||
"args": [
|
|
||||||
"${file}"
|
|
||||||
],
|
|
||||||
"group": {
|
|
||||||
"kind": "build",
|
|
||||||
"isDefault": true
|
|
||||||
},
|
|
||||||
"presentation": {
|
|
||||||
"echo": true,
|
|
||||||
"reveal": "always",
|
|
||||||
"focus": false,
|
|
||||||
"panel": "shared"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
49
casa.full-stack.code-workspace
Normal file
49
casa.full-stack.code-workspace
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
|
||||||
|
{
|
||||||
|
"folders": [
|
||||||
|
{
|
||||||
|
"path": "./",
|
||||||
|
"name": "homesrv01"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../homeAssistant"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../lms"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../mosquitto"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "kb",
|
||||||
|
"path": "../kb"
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// "name": "kb/kb",
|
||||||
|
// "path": "../../kb/kb"
|
||||||
|
// }
|
||||||
|
|
||||||
|
|
||||||
|
],
|
||||||
|
"settings": {
|
||||||
|
"files.exclude": {
|
||||||
|
"**/.git": true,
|
||||||
|
"**/.svn": true,
|
||||||
|
"**/.hg": true,
|
||||||
|
"**/CVS": true,
|
||||||
|
"**/.DS_Store": true,
|
||||||
|
"**/Thumbs.db": true,
|
||||||
|
"kb": true,
|
||||||
|
|
||||||
|
},
|
||||||
|
|
||||||
|
"ansible.python.interpreterPath": "/bin/python",
|
||||||
|
"cSpell.words": [
|
||||||
|
"lmscommunity",
|
||||||
|
"localtime",
|
||||||
|
"lyrionmusicserver",
|
||||||
|
"rclone",
|
||||||
|
"reverseproxy"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,166 +0,0 @@
|
|||||||
# Casa k3s cluster
|
|
||||||
|
|
||||||
**Table of Contents:**
|
|
||||||
|
|
||||||
- [Master Node](#master-node)
|
|
||||||
- [Master Node - proxmox vm](#master-node---proxmox-vm)
|
|
||||||
- [Master Node - network configuration](#master-node---network-configuration)
|
|
||||||
- [minion01 - worker node](#minion01---worker-node)
|
|
||||||
- [Minion01 - proxmox vm](#minion01---proxmox-vm)
|
|
||||||
- [Minion01 - k3s -setup](#minion01---k3s--setup)
|
|
||||||
|
|
||||||
**Disable swap:**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
swapoff -a
|
|
||||||
Edit /etc/fstab and comment out any swap entries:
|
|
||||||
# /swapfile none swap sw 0 0
|
|
||||||
```
|
|
||||||
|
|
||||||
**Other Packages:**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
sudo apt update -y | sudo apt install curl btop -y
|
|
||||||
```
|
|
||||||
|
|
||||||
## Master Node
|
|
||||||
|
|
||||||
| Name | Value |
|
|
||||||
| --- | --- |
|
|
||||||
| **NAME** | casa.dev.lan |
|
|
||||||
| **VLAN** | casa |
|
|
||||||
| **IP** | 192.168.14.9 |
|
|
||||||
| **OS** | Debian GNU/Linux 12 (bookworm) |
|
|
||||||
| **k3s** | v1.34.3+k3s1 (48ffa7b6) |
|
|
||||||
| **go** | go1.24.11 |
|
|
||||||
|
|
||||||
### Master Node - proxmox vm
|
|
||||||
|
|
||||||
*hosted on surfacepro.*
|
|
||||||
|
|
||||||
``` yaml
|
|
||||||
agent: 1
|
|
||||||
balloon: 0
|
|
||||||
boot: order=scsi0;ide2;net0
|
|
||||||
cores: 2
|
|
||||||
cpu: host
|
|
||||||
ide2: none,media=cdrom
|
|
||||||
memory: 2355
|
|
||||||
meta: creation-qemu=10.1.2,ctime=1762626497
|
|
||||||
name: casa
|
|
||||||
net0: virtio=BXX:XX:XX:XX:XX:XX,bridge=vmbr0,tag=xx
|
|
||||||
numa: 0
|
|
||||||
onboot: 1
|
|
||||||
ostype: l26
|
|
||||||
scsi0: local-lvm:vm-XXX-disk-0,iothread=1,size=24G,ssd=1
|
|
||||||
scsihw: virtio-scsi-single
|
|
||||||
smbios1: uuid=cxxxx-xxxx-xxxx-xxxx-xxxx
|
|
||||||
sockets: 1
|
|
||||||
usb0: host=1-1.1
|
|
||||||
```
|
|
||||||
|
|
||||||
### Master Node - network configuration
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
ip a # check ethernet name
|
|
||||||
|
|
||||||
# removes automatic configuration as dhcp client
|
|
||||||
sed -i '/ens18/d' /etc/network/interfaces
|
|
||||||
|
|
||||||
cat <<EOF > /etc/network/interfaces.d/ens18
|
|
||||||
# my network configuration
|
|
||||||
auto ens18
|
|
||||||
iface ens18 inet static
|
|
||||||
address 192.168.14.9/24
|
|
||||||
gateway 192.168.0.1
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /etc/resolv.conf
|
|
||||||
domain dev.lan
|
|
||||||
search dev.lan. lan.
|
|
||||||
nameserver 192.168.14.1
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
curl -sfL https://get.k3s.io | sh -
|
|
||||||
```
|
|
||||||
|
|
||||||
**Taint NoSchedule on master node:**
|
|
||||||
|
|
||||||
kubectl taint nodes <master-node-name> node-role.kubernetes.io/control-plane=:NoSchedule
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
kubectl taint nodes casa node-role.kubernetes.io/control-plane=:NoSchedule
|
|
||||||
```
|
|
||||||
|
|
||||||
## minion01 - worker node
|
|
||||||
|
|
||||||
### Minion01 - proxmox vm
|
|
||||||
|
|
||||||
*hosted on gaia.*
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
agent: 1
|
|
||||||
balloon: 0
|
|
||||||
boot: order=scsi0;ide2;net0
|
|
||||||
cores: 4
|
|
||||||
cpu: host
|
|
||||||
ide2: none,media=cdrom
|
|
||||||
memory: 4096
|
|
||||||
meta: creation-qemu=10.1.2,ctime=1763219351
|
|
||||||
name: casa-minion-01
|
|
||||||
net0: virtio=BXX:XX:XX:XX:XX:XX,bridge=vmbr0,tag=xx
|
|
||||||
numa: 0
|
|
||||||
onboot: 1
|
|
||||||
ostype: l26
|
|
||||||
scsi0: fastcore:vm-XXX-disk-0,iothread=1,size=8G,ssd=1
|
|
||||||
scsi1: fastcore:vm-XXX-disk-1,iothread=1,size=16G,ssd=1
|
|
||||||
scsihw: virtio-scsi-single
|
|
||||||
smbios1: xxxx-xxxx-xxxx-xxxx-xxxx
|
|
||||||
sockets: 1
|
|
||||||
```
|
|
||||||
|
|
||||||
| Name | Value |
|
|
||||||
| --- | --- |
|
|
||||||
| **NAME** | minion01 |
|
|
||||||
| **VLAN** | casa |
|
|
||||||
| **IP** | 192.168.14.10 |
|
|
||||||
| **OS** | Debian GNU/Linux 12 (bookworm) |
|
|
||||||
| **k3s** | v1.34.3+k3s1 (48ffa7b6) |
|
|
||||||
| **go** | go1.24.11 |
|
|
||||||
|
|
||||||
### Minion01 - k3s -setup
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
# install k3s as agent / worker node
|
|
||||||
# execute on server to get token
|
|
||||||
# cat /var/lib/rancher/k3s/server/node-token
|
|
||||||
|
|
||||||
TOKEN="???"
|
|
||||||
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent --data-dir /dataDisk/k3s --server https://casa.dev.lan:6443 --token ${TOKEN}" sh -s -
|
|
||||||
```
|
|
||||||
|
|
||||||
Change kubectl -n kube-system edit configmap local-path-config on kube-system to set path to provisioner.
|
|
||||||
|
|
||||||
``` yaml
|
|
||||||
config.json: |-
|
|
||||||
{
|
|
||||||
"nodePathMap":[
|
|
||||||
{
|
|
||||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
|
||||||
"paths":["/var/lib/rancher/k3s/storage"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"node":"casa-minion-01",
|
|
||||||
"paths":["/dataDisk/k3s/storage"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Set node labels:**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
kubectl label node casa-minion-01 role=worker-node
|
|
||||||
```
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
# Deploy
|
|
||||||
|
|
||||||
```bash
|
|
||||||
set -a
|
|
||||||
source ./.env
|
|
||||||
set +a
|
|
||||||
envsubst < ./secrets.yaml | kubectl apply -f -
|
|
||||||
kubectl apply -f deploy.yaml
|
|
||||||
```
|
|
||||||
@@ -1,235 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: casa-vlan-cicd
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
namespace: casa-vlan-cicd
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
labels:
|
|
||||||
app: casa-vlan-cicd-runners
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: casa-vlan-cicd-runners
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: casa-vlan-cicd-runners
|
|
||||||
spec:
|
|
||||||
nodeSelector:
|
|
||||||
role: worker-node
|
|
||||||
containers:
|
|
||||||
- name: mylimbo-casa-vlan-cicd-runners
|
|
||||||
image: git.limbosolutions.com/kb/gitea/act_runner:0.2.13-network-stack
|
|
||||||
imagePullPolicy: Always
|
|
||||||
env:
|
|
||||||
- name: LOG_LEVEL
|
|
||||||
value: "trace"
|
|
||||||
|
|
||||||
- name: CONFIG_FILE
|
|
||||||
value: /config.yaml
|
|
||||||
- name: GITEA_INSTANCE_URL
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
key: GITEA_INSTANCE_URL
|
|
||||||
- name: GITEA_RUNNER_REGISTRATION_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
key: GITEA_MYLIMBO_RUNNER_REGISTRATION_TOKEN
|
|
||||||
- name: GITEA_RUNNER_NAME
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
key: GITEA_MYLIMBO_RUNNER_NAME
|
|
||||||
|
|
||||||
- name: GITEA_RUNNER_CAPACITY
|
|
||||||
value: "1"
|
|
||||||
- name: GITEA_RUNNER_EPHEMERAL
|
|
||||||
value: "0"
|
|
||||||
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "100m"
|
|
||||||
limits:
|
|
||||||
memory: "512Mi"
|
|
||||||
cpu: "400m"
|
|
||||||
volumeMounts:
|
|
||||||
- name: config-map
|
|
||||||
mountPath: /config.yaml
|
|
||||||
subPath: config.yaml
|
|
||||||
|
|
||||||
- name: mf-casa-vlan-cicd-runners
|
|
||||||
image: git.limbosolutions.com/kb/gitea/act_runner:0.2.13-network-stack
|
|
||||||
imagePullPolicy: Always
|
|
||||||
env:
|
|
||||||
- name: LOG_LEVEL
|
|
||||||
value: "trace"
|
|
||||||
|
|
||||||
- name: CONFIG_FILE
|
|
||||||
value: /config.yaml
|
|
||||||
- name: GITEA_INSTANCE_URL
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
key: GITEA_INSTANCE_URL
|
|
||||||
- name: GITEA_RUNNER_REGISTRATION_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
key: GITEA_MF_RUNNER_REGISTRATION_TOKEN
|
|
||||||
- name: GITEA_RUNNER_NAME
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
key: GITEA_MF_RUNNER_NAME
|
|
||||||
|
|
||||||
- name: GITEA_RUNNER_CAPACITY
|
|
||||||
value: "1"
|
|
||||||
- name: GITEA_RUNNER_EPHEMERAL
|
|
||||||
value: "0"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "100m"
|
|
||||||
limits:
|
|
||||||
memory: " 512Mi"
|
|
||||||
cpu: "400m"
|
|
||||||
volumeMounts:
|
|
||||||
- name: config-map
|
|
||||||
mountPath: /config.yaml
|
|
||||||
subPath: config.yaml
|
|
||||||
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- name: config-map
|
|
||||||
configMap:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
namespace: casa-vlan-cicd
|
|
||||||
data:
|
|
||||||
config.yaml: |
|
|
||||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
|
||||||
|
|
||||||
# You don't have to copy this file to your instance,
|
|
||||||
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
|
||||||
|
|
||||||
log:
|
|
||||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
|
||||||
level: info
|
|
||||||
|
|
||||||
runner:
|
|
||||||
executor: shell
|
|
||||||
# Where to store the registration result.
|
|
||||||
file: .runner
|
|
||||||
# Execute how many tasks concurrently at the same time.
|
|
||||||
capacity: 1
|
|
||||||
# Extra environment variables to run jobs.
|
|
||||||
envs:
|
|
||||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
|
||||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
|
||||||
# Extra environment variables to run jobs from a file.
|
|
||||||
# It will be ignored if it's empty or the file doesn't exist.
|
|
||||||
env_file: .env
|
|
||||||
# The timeout for a job to be finished.
|
|
||||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
|
||||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
|
||||||
timeout: 3h
|
|
||||||
# The timeout for the runner to wait for running jobs to finish when shutting down.
|
|
||||||
# Any running jobs that haven't finished after this timeout will be cancelled.
|
|
||||||
shutdown_timeout: 0s
|
|
||||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
|
||||||
insecure: false
|
|
||||||
# The timeout for fetching the job from the Gitea instance.
|
|
||||||
fetch_timeout: 5s
|
|
||||||
# The interval for fetching the job from the Gitea instance.
|
|
||||||
fetch_interval: 2s
|
|
||||||
# The github_mirror of a runner is used to specify the mirror address of the github that pulls the action repository.
|
|
||||||
# It works when something like `uses: actions/checkout@v4` is used and DEFAULT_ACTIONS_URL is set to github,
|
|
||||||
# and github_mirror is not empty. In this case,
|
|
||||||
# it replaces https://github.com with the value here, which is useful for some special network environments.
|
|
||||||
github_mirror: ''
|
|
||||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
|
||||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
|
|
||||||
# Find more images provided by Gitea at https://gitea.com/docker.gitea.com/runner-images .
|
|
||||||
# If it's empty when registering, it will ask for inputting labels.
|
|
||||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
|
||||||
labels:
|
|
||||||
- "casa-vlan-cicd"
|
|
||||||
|
|
||||||
cache:
|
|
||||||
# Enable cache server to use actions/cache.
|
|
||||||
enabled: true
|
|
||||||
# The directory to store the cache data.
|
|
||||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
|
||||||
dir: ""
|
|
||||||
# The host of the cache server.
|
|
||||||
# It's not for the address to listen, but the address to connect from job containers.
|
|
||||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
|
||||||
host: ""
|
|
||||||
# The port of the cache server.
|
|
||||||
# 0 means to use a random available port.
|
|
||||||
port: 0
|
|
||||||
# The external cache server URL. Valid only when enable is true.
|
|
||||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
|
||||||
# The URL should generally end with "/".
|
|
||||||
external_server: ""
|
|
||||||
|
|
||||||
container:
|
|
||||||
# Specifies the network to which the container will connect.
|
|
||||||
# Could be host, bridge or the name of a custom network.
|
|
||||||
# If it's empty, act_runner will create a network automatically.
|
|
||||||
network: ""
|
|
||||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
|
||||||
privileged: false
|
|
||||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
|
||||||
options:
|
|
||||||
# The parent directory of a job's working directory.
|
|
||||||
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
|
||||||
# If the path starts with '/', the '/' will be trimmed.
|
|
||||||
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
|
||||||
# If it's empty, /workspace will be used.
|
|
||||||
workdir_parent:
|
|
||||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
|
||||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
|
||||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
|
||||||
# valid_volumes:
|
|
||||||
# - data
|
|
||||||
# - /src/*.json
|
|
||||||
# If you want to allow any volume, please use the following configuration:
|
|
||||||
# valid_volumes:
|
|
||||||
# - '**'
|
|
||||||
valid_volumes: []
|
|
||||||
# overrides the docker client host with the specified one.
|
|
||||||
# If it's empty, act_runner will find an available docker host automatically.
|
|
||||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
|
||||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
|
||||||
docker_host: ""
|
|
||||||
# Pull docker image(s) even if already present
|
|
||||||
force_pull: true
|
|
||||||
# Rebuild docker image(s) even if already present
|
|
||||||
force_rebuild: false
|
|
||||||
# Always require a reachable docker daemon, even if not required by act_runner
|
|
||||||
require_docker: false
|
|
||||||
# Timeout to wait for the docker daemon to be reachable, if docker is required by require_docker or act_runner
|
|
||||||
docker_timeout: 0s
|
|
||||||
|
|
||||||
host:
|
|
||||||
# The parent directory of a job's working directory.
|
|
||||||
# If it's empty, $HOME/.cache/act/ will be used.
|
|
||||||
workdir_parent:
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: casa-vlan-cicd-runners
|
|
||||||
namespace: casa-vlan-cicd
|
|
||||||
stringData:
|
|
||||||
GITEA_INSTANCE_URL: ${GITEA_INSTANCE_URL}
|
|
||||||
GITEA_RUNNER_REGISTRATION_TOKEN: ${GITEA_RUNNER_REGISTRATION_TOKEN}
|
|
||||||
GITEA_MYLIMBO_RUNNER_NAME: ${GITEA_MYLIMBO_RUNNER_NAME}
|
|
||||||
GITEA_MYLIMBO_RUNNER_REGISTRATION_TOKEN: ${GITEA_MYLIMBO_RUNNER_REGISTRATION_TOKEN}
|
|
||||||
GITEA_MF_RUNNER_NAME: ${GITEA_MF_RUNNER_NAME}
|
|
||||||
GITEA_MF_RUNNER_REGISTRATION_TOKEN: ${GITEA_MF_RUNNER_REGISTRATION_TOKEN}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
# monitoring
|
|
||||||
|
|
||||||
## namespace
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
kubectl create namespace monitoring
|
|
||||||
```
|
|
||||||
|
|
||||||
## promtail
|
|
||||||
|
|
||||||
### setup
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
# add repo
|
|
||||||
helm repo add grafana https://grafana.github.io/helm-charts
|
|
||||||
# Install & Upgrade
|
|
||||||
helm upgrade --install promtail grafana/promtail --namespace monitoring \
|
|
||||||
--values=./promtail/values.yaml \
|
|
||||||
--values=./promtail//values.local.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Continuous Deploy
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
namespace: monitoring
|
|
||||||
name: ci-cd
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods", "services", "secrets", "configmaps", "persistentvolumeclaims", "endpoints", "serviceaccounts"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
- apiGroups: ["apps"]
|
|
||||||
resources: ["deployments", "statefulsets","daemonsets"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingresses"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
- apiGroups: ["traefik.io"]
|
|
||||||
resources: ["ingressroutes"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
|
|
||||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
|
||||||
resources: ["clusterroles", "clusterrolebindings"]
|
|
||||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
|
||||||
|
|
||||||
|
|
||||||
# telegraf
|
|
||||||
- apiGroups: ["monitoring.coreos.com"]
|
|
||||||
resources: ["servicemonitors", "podmonitors", "prometheuses", "alertmanagers"]
|
|
||||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: ci-cd
|
|
||||||
namespace: monitoring
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: casa-ci-cd
|
|
||||||
namespace: home-assistant
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: ci-cd
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: ci-cd-cluster
|
|
||||||
rules:
|
|
||||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
|
||||||
resources: ["clusterroles", "clusterrolebindings"]
|
|
||||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: ci-cd-clusterbinding
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: casa-ci-cd
|
|
||||||
namespace: home-assistant
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: ci-cd-cluster
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
|
|
||||||
---
|
|
||||||
# telegraf
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
namespace: kube-system
|
|
||||||
name: ci-cd-kube-system
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["services"]
|
|
||||||
verbs: ["get", "list", "create", "update", "patch", "delete"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: ci-cd-kube-system
|
|
||||||
namespace: kube-system
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: casa-ci-cd
|
|
||||||
namespace: home-assistant
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: ci-cd-kube-system
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
# Prometheus Setup
|
|
||||||
|
|
||||||
- <https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack>
|
|
||||||
- <https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml>
|
|
||||||
|
|
||||||
## helm chart
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#add repo
|
|
||||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
|
||||||
```
|
|
||||||
|
|
||||||
**This helm chart, installs:**
|
|
||||||
|
|
||||||
- crd
|
|
||||||
- Operator
|
|
||||||
- kubernetes services monitors
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl get namespace monitoring || kubectl create namespace monitoring
|
|
||||||
helm upgrade --install prometheus-stack prometheus-community/kube-prometheus-stack \
|
|
||||||
--namespace monitoring \
|
|
||||||
--values=./helm/01-only-crd-and-operator.yaml \
|
|
||||||
--values=./helm/02-kube-metrics.yaml \
|
|
||||||
--values=./helm/03-node-exporter.yaml \
|
|
||||||
--values=./helm/04-kubelet.yaml \
|
|
||||||
--values=./helm/10-testing-values.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## deploy prometheus agent
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl apply -f ./prometheus-agent.yaml
|
|
||||||
```
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
# values.yaml to install only Prometheus Operator and CRDs
|
|
||||||
|
|
||||||
# Disable all components except the operator
|
|
||||||
defaultRules:
|
|
||||||
create: false
|
|
||||||
|
|
||||||
alertmanager:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
grafana:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
kubeStateMetrics:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
nodeExporter:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
prometheus:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
coreDns:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
kubeControllerManager:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
kubeEtcd:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
kubeProxy:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
kubeScheduler:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
prometheusOperator:
|
|
||||||
enabled: true
|
|
||||||
createCustomResource: true
|
|
||||||
tls:
|
|
||||||
enabled: false
|
|
||||||
admissionWebhooks:
|
|
||||||
enabled: false
|
|
||||||
cleanupCustomResource: false
|
|
||||||
serviceMonitor:
|
|
||||||
selfMonitor: false
|
|
||||||
kubeletService:
|
|
||||||
enabled: true
|
|
||||||
# requires manual creation of service #prom-kublet-service
|
|
||||||
nodeSelector:
|
|
||||||
role: worker-node
|
|
||||||
|
|
||||||
# global:
|
|
||||||
# nodeSelector:
|
|
||||||
# dedicated: worker-node
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
kubeStateMetrics:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
kube-state-metrics: # ok tested!
|
|
||||||
podLabels:
|
|
||||||
role: worker-node
|
|
||||||
nodeSelector:
|
|
||||||
role: worker-node
|
|
||||||
prometheus:
|
|
||||||
monitor:
|
|
||||||
relabelings:
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
additionalLabels:
|
|
||||||
app.kubernetes.io/name: prometheus-kube-state-metrics # !important: selector used by agent
|
|
||||||
|
|
||||||
|
|
||||||
coreDns: # ok tested!
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
relabelings:
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
additionalLabels:
|
|
||||||
app.kubernetes.io/name: prometheus-stack-coredns # !important: selector used by agent
|
|
||||||
|
|
||||||
kubeApiServer: # ok tested!
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
relabelings:
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
additionalLabels:
|
|
||||||
app.kubernetes.io/name: prometheus-stack-apiserver # !important: selector used by agent
|
|
||||||
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
# Deploy node exporter as a daemonset to all nodes
|
|
||||||
nodeExporter:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
|
|
||||||
# job node exporter
|
|
||||||
prometheus-node-exporter:
|
|
||||||
prometheus:
|
|
||||||
monitor:
|
|
||||||
enabled: true
|
|
||||||
relabelings:
|
|
||||||
# https://github.com/dotdc/grafana-dashboards-kubernetes
|
|
||||||
- action: replace
|
|
||||||
sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
||||||
targetLabel: nodename
|
|
||||||
|
|
||||||
# identification of cluster
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
# Used file to testing new options and configurations
|
|
||||||
# Should be the laste file to be loaded
|
|
||||||
kubelet:
|
|
||||||
enabled: true
|
|
||||||
namespace: kube-system
|
|
||||||
serviceMonitor:
|
|
||||||
interval: 30s #WARN: Error on ingesting out-of-order samples. https://github.com/prometheus-community/helm-charts/issues/5483
|
|
||||||
enabled: true
|
|
||||||
## Enable scraping /metrics from kubelet's service
|
|
||||||
kubelet: true
|
|
||||||
additionalLabels:
|
|
||||||
app.kubernetes.io/name: prometheus-kubelet # !important: selector used by agent
|
|
||||||
|
|
||||||
probesMetricRelabelings:
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
|
|
||||||
- sourceLabels: [__name__, image]
|
|
||||||
separator: ;
|
|
||||||
regex: container_([a-z_]+);
|
|
||||||
replacement: $1
|
|
||||||
action: drop
|
|
||||||
- sourceLabels: [__name__]
|
|
||||||
separator: ;
|
|
||||||
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
|
|
||||||
replacement: $1
|
|
||||||
action: drop
|
|
||||||
|
|
||||||
|
|
||||||
# # RelabelConfigs to apply to samples before scraping
|
|
||||||
# # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
|
|
||||||
# #
|
|
||||||
# # metrics_path is required to match upstream rules and charts
|
|
||||||
cAdvisorRelabelings:
|
|
||||||
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
|
|
||||||
- action: replace
|
|
||||||
sourceLabels: [__metrics_path__]
|
|
||||||
targetLabel: metrics_path
|
|
||||||
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
||||||
separator: ;
|
|
||||||
regex: ^(.*)$
|
|
||||||
targetLabel: nodename
|
|
||||||
replacement: $1
|
|
||||||
action: replace
|
|
||||||
|
|
||||||
# # RelabelConfigs to apply to samples before scraping
|
|
||||||
# # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
|
|
||||||
# #
|
|
||||||
probesRelabelings:
|
|
||||||
|
|
||||||
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
|
|
||||||
- action: replace
|
|
||||||
sourceLabels: [__metrics_path__]
|
|
||||||
targetLabel: metrics_path
|
|
||||||
|
|
||||||
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
|
||||||
separator: ;
|
|
||||||
regex: ^(.*)$
|
|
||||||
targetLabel: nodename
|
|
||||||
replacement: $1
|
|
||||||
action: replace
|
|
||||||
|
|
||||||
resourceRelabelings:
|
|
||||||
|
|
||||||
- targetLabel: cluster
|
|
||||||
replacement: casa
|
|
||||||
|
|
||||||
- action: replace
|
|
||||||
sourceLabels: [__metrics_path__]
|
|
||||||
targetLabel: metrics_path
|
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
# use for testing
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,71 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: monitoring.coreos.com/v1alpha1
|
|
||||||
kind: PrometheusAgent
|
|
||||||
metadata:
|
|
||||||
name: prometheus-agent
|
|
||||||
namespace: monitoring
|
|
||||||
spec:
|
|
||||||
serviceMonitorNamespaceSelector: {}
|
|
||||||
podMonitorNamespaceSelector: {}
|
|
||||||
serviceMonitorSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: app.kubernetes.io/name
|
|
||||||
operator: Exists
|
|
||||||
podMonitorSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: app.kubernetes.io/name
|
|
||||||
operator: Exists
|
|
||||||
replicas: 1
|
|
||||||
remoteWrite:
|
|
||||||
- url: https://prometheus.monitoring.limbosolutions.com/api/v1/write
|
|
||||||
scrapeInterval: 60s
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 128Mi
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 512Mi
|
|
||||||
serviceAccountName: prometheus-agent
|
|
||||||
nodeSelector:
|
|
||||||
role: worker-node
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: prometheus-agent
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["nodes", "nodes/metrics", "nodes/proxy", "services", "endpoints", "pods"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["monitoring.coreos.com"]
|
|
||||||
resources: ["servicemonitors", "podmonitors"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- nonResourceURLs:
|
|
||||||
- /metrics
|
|
||||||
- /metrics/cadvisor
|
|
||||||
- /metrics/probes
|
|
||||||
verbs: ["get"]
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: prometheus-agent-monitoring
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: prometheus-agent
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: prometheus-agent
|
|
||||||
namespace: monitoring
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: prometheus-agent
|
|
||||||
namespace: monitoring
|
|
||||||
1
monitoring/promtail/.gitignore
vendored
1
monitoring/promtail/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
**.local.**
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
config:
|
|
||||||
clients:
|
|
||||||
- url: "????" #replaced values.local.yaml. Example: https://lokiserver/loki/api/v1/push
|
|
||||||
# by default all scrap configs had node_name
|
|
||||||
snippets:
|
|
||||||
extraRelabelConfigs:
|
|
||||||
- target_label: host
|
|
||||||
replacement: ${HOSTNAME}
|
|
||||||
- target_label: cluster
|
|
||||||
replacement: casa
|
|
||||||
|
|
||||||
extraScrapeConfigs: |
|
|
||||||
#scrape config for syslog
|
|
||||||
- job_name: host-journald
|
|
||||||
journal:
|
|
||||||
json: true
|
|
||||||
max_age: 24h
|
|
||||||
path: /var/log/host/journal
|
|
||||||
labels:
|
|
||||||
job: journald
|
|
||||||
relabel_configs:
|
|
||||||
- source_labels: ['__journal__systemd_unit']
|
|
||||||
target_label: 'journal_systemd_unit'
|
|
||||||
- source_labels: ['__journal_syslog_identifier']
|
|
||||||
target_label: 'journal_syslog_identifier'
|
|
||||||
- source_labels: ['__journal__hostname']
|
|
||||||
target_label: 'journal_hostname'
|
|
||||||
- target_label: 'host'
|
|
||||||
replacement: '${HOSTNAME}'
|
|
||||||
- target_label: 'cluster'
|
|
||||||
replacement: 'casa'
|
|
||||||
|
|
||||||
extraArgs:
|
|
||||||
- --config.expand-env=true
|
|
||||||
extraVolumes:
|
|
||||||
- name: node-logs
|
|
||||||
hostPath:
|
|
||||||
path: /var/log
|
|
||||||
|
|
||||||
extraVolumeMounts:
|
|
||||||
- name: node-logs
|
|
||||||
mountPath: /var/log/host
|
|
||||||
readOnly: true
|
|
||||||
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 100Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 50Mi
|
|
||||||
|
|
||||||
40
rclone.docker-plugin.playbook.yaml
Normal file
40
rclone.docker-plugin.playbook.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
- name: Setup rclone docker plugin
|
||||||
|
become: true
|
||||||
|
vars:
|
||||||
|
# rclone_docker_plugin_config: |
|
||||||
|
# [s3-limbo-storage]
|
||||||
|
# type = s3
|
||||||
|
# provider = SeaweedFS
|
||||||
|
# access_key_id = !!! SET ON INVENTORY !!!
|
||||||
|
# secret_access_key = !!! SET ON INVENTORY !!!
|
||||||
|
# endpoint = !!! SET ON INVENTORY !!!
|
||||||
|
|
||||||
|
hosts:
|
||||||
|
- homesrv01
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Enforce folders
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/docker-plugins/rclone/config
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: u=rwx,g=r,o-rwx
|
||||||
|
recurse: true
|
||||||
|
|
||||||
|
- name: Enforce folders
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/docker-plugins/rclone/cache
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: u=rwx,g=r,o-rwx
|
||||||
|
recurse: true
|
||||||
|
|
||||||
|
- name: Setup rclone s3
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /var/lib/docker-plugins/rclone/config/rclone.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: u=rwx,g-rwx,o-rwx
|
||||||
|
content: "{{ rclone_docker_plugin_config }}"
|
||||||
140
services/myLimbo-casa-gitea-act-runner/docker-compose.yml
Normal file
140
services/myLimbo-casa-gitea-act-runner/docker-compose.yml
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
services:
|
||||||
|
act-runner:
|
||||||
|
container_name: myLimbo-casa-gitea-act-runner
|
||||||
|
image: docker.io/gitea/act_runner:latest
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- myLimbo-casa-gitea-act-runner-data:/data
|
||||||
|
- myLimbo-casa-gitea-act-runner-config:/config
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
environment:
|
||||||
|
- GITEA_INSTANCE_URL=${GITEA_INSTANCE_URL}
|
||||||
|
# When using Docker Secrets, it's also possible to use
|
||||||
|
# GITEA_RUNNER_REGISTRATION_TOKEN_FILE to pass the location.
|
||||||
|
# The env var takes precedence.
|
||||||
|
# Needed only for the first start.
|
||||||
|
- CONFIG_FILE= /config/config.yaml
|
||||||
|
- GITEA_RUNNER_REGISTRATION_TOKEN=${GITEA_RUNNER_REGISTRATION_TOKEN}
|
||||||
|
- GITEA_RUNNER_NAME=myLimbo-casa-gitea-act-runner
|
||||||
|
#- GITEA_RUNNER_CONFIG_FILE="/config/config.yaml"
|
||||||
|
|
||||||
|
|
||||||
|
configs:
|
||||||
|
- source: act-runner-config
|
||||||
|
target: /config/config.yaml
|
||||||
|
mode: 0444
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
myLimbo-casa-gitea-act-runner-data:
|
||||||
|
name: myLimbo-casa-gitea-act-runner-data
|
||||||
|
myLimbo-casa-gitea-act-runner-config:
|
||||||
|
name: myLimbo-casa-gitea-act-runner-config
|
||||||
|
|
||||||
|
|
||||||
|
configs:
|
||||||
|
act-runner-config:
|
||||||
|
content: |
|
||||||
|
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
||||||
|
|
||||||
|
# You don't have to copy this file to your instance,
|
||||||
|
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
||||||
|
|
||||||
|
log:
|
||||||
|
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||||
|
level: info
|
||||||
|
|
||||||
|
runner:
|
||||||
|
# Where to store the registration result.
|
||||||
|
file: .runner
|
||||||
|
# Execute how many tasks concurrently at the same time.
|
||||||
|
capacity: 1
|
||||||
|
# Extra environment variables to run jobs.
|
||||||
|
envs:
|
||||||
|
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||||
|
A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||||
|
# Extra environment variables to run jobs from a file.
|
||||||
|
# It will be ignored if it's empty or the file doesn't exist.
|
||||||
|
env_file: .env
|
||||||
|
# The timeout for a job to be finished.
|
||||||
|
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||||
|
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||||
|
timeout: 3h
|
||||||
|
# The timeout for the runner to wait for running jobs to finish when shutting down.
|
||||||
|
# Any running jobs that haven't finished after this timeout will be cancelled.
|
||||||
|
shutdown_timeout: 0s
|
||||||
|
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||||
|
insecure: false
|
||||||
|
# The timeout for fetching the job from the Gitea instance.
|
||||||
|
fetch_timeout: 5s
|
||||||
|
# The interval for fetching the job from the Gitea instance.
|
||||||
|
fetch_interval: 2s
|
||||||
|
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||||
|
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||||
|
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
|
||||||
|
# If it's empty when registering, it will ask for inputting labels.
|
||||||
|
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||||
|
labels:
|
||||||
|
- "myLimbo-casa-gitea-act-runner:docker://gitea/runner-images:ubuntu-latest"
|
||||||
|
#- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||||
|
#- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
||||||
|
#- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
||||||
|
|
||||||
|
cache:
|
||||||
|
# Enable cache server to use actions/cache.
|
||||||
|
enabled: true
|
||||||
|
# The directory to store the cache data.
|
||||||
|
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||||
|
dir: ""
|
||||||
|
# The host of the cache server.
|
||||||
|
# It's not for the address to listen, but the address to connect from job containers.
|
||||||
|
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||||
|
host: ""
|
||||||
|
# The port of the cache server.
|
||||||
|
# 0 means to use a random available port.
|
||||||
|
port: 0
|
||||||
|
# The external cache server URL. Valid only when enable is true.
|
||||||
|
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||||
|
# The URL should generally end with "/".
|
||||||
|
external_server: ""
|
||||||
|
|
||||||
|
container:
|
||||||
|
# Specifies the network to which the container will connect.
|
||||||
|
# Could be host, bridge or the name of a custom network.
|
||||||
|
# If it's empty, act_runner will create a network automatically.
|
||||||
|
network: ""
|
||||||
|
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||||
|
privileged: false
|
||||||
|
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||||
|
options:
|
||||||
|
# The parent directory of a job's working directory.
|
||||||
|
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
||||||
|
# If the path starts with '/', the '/' will be trimmed.
|
||||||
|
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
||||||
|
# If it's empty, /workspace will be used.
|
||||||
|
workdir_parent:
|
||||||
|
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||||
|
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||||
|
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||||
|
# valid_volumes:
|
||||||
|
# - data
|
||||||
|
# - /src/*.json
|
||||||
|
# If you want to allow any volume, please use the following configuration:
|
||||||
|
# valid_volumes:
|
||||||
|
# - '**'
|
||||||
|
valid_volumes: []
|
||||||
|
# overrides the docker client host with the specified one.
|
||||||
|
# If it's empty, act_runner will find an available docker host automatically.
|
||||||
|
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||||
|
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||||
|
docker_host: ""
|
||||||
|
# Pull docker image(s) even if already present
|
||||||
|
force_pull: true
|
||||||
|
# Rebuild docker image(s) even if already present
|
||||||
|
force_rebuild: false
|
||||||
|
|
||||||
|
host:
|
||||||
|
# The parent directory of a job's working directory.
|
||||||
|
# If it's empty, $HOME/.cache/act/ will be used.
|
||||||
|
workdir_parent:
|
||||||
|
|
||||||
41
services/nginx/docker-compose.yaml
Normal file
41
services/nginx/docker-compose.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
configs:
|
||||||
|
run-sh:
|
||||||
|
content: |
|
||||||
|
#!/bin/sh
|
||||||
|
# patch security so kubctl on sync-certs-job can write to the mounted volume
|
||||||
|
chown -R 1001:1001 /etc/ssl/certs/casa-limbosolutions-com-certs
|
||||||
|
while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g "daemon off;"
|
||||||
|
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
build:
|
||||||
|
context: ./docker
|
||||||
|
pull_policy: build
|
||||||
|
image: homesrv/nginx:latest
|
||||||
|
volumes:
|
||||||
|
- casa-limbosolutions-com-certs:/etc/ssl/certs/casa-limbosolutions-com-certs
|
||||||
|
ports:
|
||||||
|
- 443:443
|
||||||
|
- 80:80
|
||||||
|
networks:
|
||||||
|
- public
|
||||||
|
restart: unless-stopped
|
||||||
|
command: /bin/sh -c '/run.sh'
|
||||||
|
configs:
|
||||||
|
- source: run-sh
|
||||||
|
target: /run.sh
|
||||||
|
mode: 0755
|
||||||
|
volumes:
|
||||||
|
nginx-conf.d:
|
||||||
|
|
||||||
|
casa-limbosolutions-com-certs:
|
||||||
|
name: casa-limbosolutions-com-certs
|
||||||
|
external: false
|
||||||
|
|
||||||
|
networks:
|
||||||
|
public:
|
||||||
|
name: reverseproxy_public
|
||||||
|
external: true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
4
services/nginx/docker/Dockerfile
Normal file
4
services/nginx/docker/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
FROM nginx:latest
|
||||||
|
|
||||||
|
COPY nginx.conf.d/* /etc/nginx/conf.d
|
||||||
|
|
||||||
35
services/nginx/docker/nginx.conf.d/has.conf
Normal file
35
services/nginx/docker/nginx.conf.d/has.conf
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
map $http_upgrade $connection_upgrade {
|
||||||
|
default upgrade;
|
||||||
|
'' close;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name casa.limbosolutions.com *.casa.limbosolutions.com has.lan;
|
||||||
|
return 301 https://has.casa.limbosolutions.com$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
listen 443 ssl;
|
||||||
|
ssl_certificate /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.crt;
|
||||||
|
ssl_certificate_key /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.key;
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://homeassistant-app:80;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection $connection_upgrade;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
32
services/nginx/docker/nginx.conf.d/lms.lan.conf
Normal file
32
services/nginx/docker/nginx.conf.d/lms.lan.conf
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
server {
|
||||||
|
server_name lms.casa.limbosolutions.com music.casa.limbosolutions.com;
|
||||||
|
listen 443 ssl;
|
||||||
|
ssl_certificate /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.crt;
|
||||||
|
ssl_certificate_key /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.key;
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
set $upstream lms-lms-1;
|
||||||
|
#docker default resolver
|
||||||
|
resolver 127.0.0.11 ipv6=off;
|
||||||
|
proxy_pass http://$upstream:9002;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Protocol $scheme;
|
||||||
|
proxy_set_header X-Url-Scheme $scheme;
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name lms.casa.limbosolutions.com music.casa.limbosolutions.com lms.lan music.lan;
|
||||||
|
return 301 https://lms.casa.limbosolutions.com$request_uri;
|
||||||
|
}
|
||||||
22
services/nginx/docker/nginx.conf.d/zigbee2mqtt.lan.conf
Normal file
22
services/nginx/docker/nginx.conf.d/zigbee2mqtt.lan.conf
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
proxy_buffering off;
|
||||||
|
server_name zigbee2mqtt.lan;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://zigbee2mqtt:8080/;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Protocol $scheme;
|
||||||
|
proxy_set_header X-Url-Scheme $scheme;
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
# node-red
|
|
||||||
|
|
||||||
**Deploy app:**
|
|
||||||
|
|
||||||
```bash {cwd=../../}
|
|
||||||
./services/node-red/ops-scripts/apply-app.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
**Deploy Infra:**
|
|
||||||
|
|
||||||
```bash {cwd=../../}
|
|
||||||
./services/node-red/ops-scripts/apply-infra.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Create password to add to node-red settings file.
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
#npm install bcryptjs
|
|
||||||
node -e "console.log(require('bcryptjs').hashSync(process.argv[1], 8));" YOUR-PASSWORD
|
|
||||||
```
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
module.exports = {
|
|
||||||
adminAuth: {
|
|
||||||
type: "credentials",
|
|
||||||
users: [{
|
|
||||||
username: "?????",
|
|
||||||
password: "??????",
|
|
||||||
permissions: "*"
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
1
services/node-red/deploy/app/.gitignore
vendored
1
services/node-red/deploy/app/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
node-red-settings.js
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: node-red
|
|
||||||
namespace: node-red
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: node-red
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: node-red
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: node-red
|
|
||||||
image: nodered/node-red:latest
|
|
||||||
imagePullPolicy: Always
|
|
||||||
ports:
|
|
||||||
- containerPort: 1880
|
|
||||||
volumeMounts:
|
|
||||||
- name: node-red-data
|
|
||||||
mountPath: /data
|
|
||||||
- name: node-red-settings
|
|
||||||
mountPath: /data/settings.js
|
|
||||||
subPath: settings.js
|
|
||||||
- name: limbomox-ssh
|
|
||||||
mountPath: /.keys/limbomox-ssh-node-red/id_ed25519
|
|
||||||
subPath: id-ed25519
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "100m"
|
|
||||||
limits:
|
|
||||||
memory: "256Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
volumes:
|
|
||||||
- name: node-red-data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: node-red
|
|
||||||
- name: node-red-settings
|
|
||||||
secret:
|
|
||||||
secretName: node-red-settings
|
|
||||||
- name: limbomox-ssh
|
|
||||||
secret:
|
|
||||||
secretName: limbomox-ssh
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- pvc.yaml
|
|
||||||
- deployment.yaml
|
|
||||||
- service.yaml
|
|
||||||
generatorOptions:
|
|
||||||
disableNameSuffixHash: true
|
|
||||||
secretGenerator:
|
|
||||||
|
|
||||||
- name: limbomox-ssh
|
|
||||||
files:
|
|
||||||
- id-ed25519=./.env.d/limbomox.node-red.id_ed25519
|
|
||||||
|
|
||||||
- name: node-red-settings
|
|
||||||
files:
|
|
||||||
- settings.js=./.env.d/node-red-settings.js
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: node-red
|
|
||||||
namespace: node-red
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 1Gi
|
|
||||||
storageClassName: local-path
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: node-red
|
|
||||||
namespace: node-red
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: node-red
|
|
||||||
ports:
|
|
||||||
- protocol: TCP
|
|
||||||
port: 1880
|
|
||||||
targetPort: 1880
|
|
||||||
type: ClusterIP
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
apiVersion: traefik.io/v1alpha1
|
|
||||||
kind: IngressRoute
|
|
||||||
metadata:
|
|
||||||
name: node-red
|
|
||||||
namespace: node-red
|
|
||||||
spec:
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
routes:
|
|
||||||
- match: Host(`node-red.casa.limbosolutions.com`)
|
|
||||||
kind: Rule
|
|
||||||
services:
|
|
||||||
- name: node-red
|
|
||||||
port: 1880
|
|
||||||
tls: {}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
- ingress.yaml
|
|
||||||
generatorOptions:
|
|
||||||
disableNameSuffixHash: true
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: node-red
|
|
||||||
labels:
|
|
||||||
name: node-red
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
echo "Executing app deploy."
|
|
||||||
|
|
||||||
kubectl kustomize ./services/node-red/deploy/app | kubectl apply -f -
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
echo "Executing infra deploy."
|
|
||||||
|
|
||||||
kubectl kustomize ./services/node-red/deploy/infra | kubectl apply -f -
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
namespace: wyoming
|
|
||||||
name: ci-cd
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods", "services", "secrets", "configmaps", "persistentvolumeclaims", "endpoints"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
- apiGroups: ["apps"]
|
|
||||||
resources: ["deployments", "statefulsets"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources: ["ingresses"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
- apiGroups: ["traefik.io"]
|
|
||||||
resources: ["ingressroutes"]
|
|
||||||
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: ci-cd
|
|
||||||
namespace: wyoming
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: casa-ci-cd
|
|
||||||
namespace: home-assistant
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: ci-cd
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: wyoming
|
|
||||||
@@ -1,148 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
---
|
|
||||||
# dns: whisper.wyoming.svc.cluster.local
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: whisper
|
|
||||||
namespace: wyoming
|
|
||||||
labels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: wyoming-whisper
|
|
||||||
ports:
|
|
||||||
- name: tcp-whisper
|
|
||||||
protocol: TCP
|
|
||||||
port: 10300
|
|
||||||
targetPort: 10300
|
|
||||||
type: ClusterIP
|
|
||||||
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
namespace: wyoming
|
|
||||||
name: wyoming-whisper
|
|
||||||
labels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
spec:
|
|
||||||
nodeSelector:
|
|
||||||
role: worker-node
|
|
||||||
containers:
|
|
||||||
- name: wyoming-whisper
|
|
||||||
image: rhasspy/wyoming-whisper
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /data
|
|
||||||
name: data
|
|
||||||
args:
|
|
||||||
- --model
|
|
||||||
- small-int8
|
|
||||||
- --language
|
|
||||||
- pt
|
|
||||||
- --beam-size
|
|
||||||
- "4"
|
|
||||||
- --debug
|
|
||||||
ports:
|
|
||||||
- name: tcp-whisper
|
|
||||||
containerPort: 10300
|
|
||||||
volumes:
|
|
||||||
- name: data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: wyoming-whisper
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: wyoming-whisper
|
|
||||||
namespace: wyoming
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 8Gi
|
|
||||||
---
|
|
||||||
|
|
||||||
# dns: piper.wyoming.svc.cluster.local
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: piper
|
|
||||||
namespace: wyoming
|
|
||||||
labels:
|
|
||||||
app: wyoming-piper
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: wyoming-piper
|
|
||||||
ports:
|
|
||||||
- name: tcp-piper
|
|
||||||
protocol: TCP
|
|
||||||
port: 10200
|
|
||||||
targetPort: 10200
|
|
||||||
type: ClusterIP
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
namespace: wyoming
|
|
||||||
name: wyoming-piper
|
|
||||||
labels:
|
|
||||||
app: wyoming-piper
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: wyoming-piper
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: wyoming-piper
|
|
||||||
spec:
|
|
||||||
nodeSelector:
|
|
||||||
role: worker-node
|
|
||||||
containers:
|
|
||||||
- name: wyoming-piper
|
|
||||||
image: rhasspy/wyoming-piper
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /data
|
|
||||||
name: data
|
|
||||||
args:
|
|
||||||
- --voice
|
|
||||||
- en-gb-southern_english_female-low
|
|
||||||
ports:
|
|
||||||
- containerPort: 10200
|
|
||||||
volumes:
|
|
||||||
- name: data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: wyoming-piper
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: wyoming-piper
|
|
||||||
namespace: wyoming
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 2Gi
|
|
||||||
25
services/wyoming/docker-compose.yaml
Normal file
25
services/wyoming/docker-compose.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
# whisper:
|
||||||
|
# image: linuxserver/faster-whisper
|
||||||
|
# restart: unless-stopped
|
||||||
|
# volumes:
|
||||||
|
# - whisper_data:/data
|
||||||
|
# ports:
|
||||||
|
# - 10300:10300
|
||||||
|
# environment:
|
||||||
|
# - TZ=Europe/Lisbon
|
||||||
|
# - WHISPER_MODEL=small-int8
|
||||||
|
# - WHISPER_LANG=pt
|
||||||
|
piper:
|
||||||
|
image: rhasspy/wyoming-piper
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- piper_data:/data
|
||||||
|
ports:
|
||||||
|
- 10200:10200
|
||||||
|
environment:
|
||||||
|
- TZ=Europe/Lisbon
|
||||||
|
command: [ "--voice", "en-gb-southern_english_female-low" ]
|
||||||
|
volumes:
|
||||||
|
whisper_data:
|
||||||
|
piper_data:
|
||||||
55
services/wyoming/whisper.kubernetes-deployment.yaml
Normal file
55
services/wyoming/whisper.kubernetes-deployment.yaml
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# currently hosted on chimera k3s cluster
|
||||||
|
# Add nodeSelector to schedule the pod on specific nodes
|
||||||
|
# Example: only schedule on nodes labeled with kubernetes.io/hostname=chimera
|
||||||
|
# Adjust the key/value as needed for your cluster
|
||||||
|
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
namespace: casa-services
|
||||||
|
name: wyoming-whisper
|
||||||
|
labels:
|
||||||
|
app: wyoming-whisper
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: wyoming-whisper
|
||||||
|
|
||||||
|
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: wyoming-whisper
|
||||||
|
spec:
|
||||||
|
#nodeSelector:
|
||||||
|
#kubernetes.io/hostname: chimera-flashgordon
|
||||||
|
tolerations:
|
||||||
|
- key: "dedicated"
|
||||||
|
value: "reserved"
|
||||||
|
effect: "NoSchedule"
|
||||||
|
|
||||||
|
containers:
|
||||||
|
- name: wyoming-whisper
|
||||||
|
image: linuxserver/faster-whisper
|
||||||
|
ports:
|
||||||
|
- containerPort: 10300
|
||||||
|
hostPort: 10300
|
||||||
|
env:
|
||||||
|
- name: WHISPER_MODEL
|
||||||
|
value: small-int8 # turbo
|
||||||
|
- name: WHISPER_LANG
|
||||||
|
value: pt
|
||||||
|
- name: OMP_NUM_THREADS
|
||||||
|
value: "4"
|
||||||
|
- name: BEAM
|
||||||
|
value: "4"
|
||||||
|
|
||||||
|
# args: ["--threads", "8"]
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# cpu: "500m"
|
||||||
|
# memory: "1Gi"
|
||||||
|
# limits:
|
||||||
|
# cpu: "1"
|
||||||
|
# memory: "2Gi"
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: zigbee2mqtt-data
|
|
||||||
namespace: mqtt
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 1Gi
|
|
||||||
storageClassName: local-path
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: zigbee2mqtt
|
|
||||||
namespace: mqtt
|
|
||||||
labels:
|
|
||||||
app: zigbee2mqtt
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: zigbee2mqtt
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: zigbee2mqtt
|
|
||||||
spec:
|
|
||||||
nodeName: casa # force deploy to master node cluster
|
|
||||||
containers:
|
|
||||||
- name: zigbee2mqtt
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
image: koenkk/zigbee2mqtt
|
|
||||||
volumeMounts:
|
|
||||||
- name: zigbee2mqtt-data
|
|
||||||
mountPath: /app/data
|
|
||||||
- name: usb-device
|
|
||||||
mountPath: /dev/ttyUSB0
|
|
||||||
- name: run-udev
|
|
||||||
mountPath: run/udev
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "50Mi"
|
|
||||||
cpu: "100m"
|
|
||||||
limits:
|
|
||||||
memory: "100Mi"
|
|
||||||
cpu: "200m"
|
|
||||||
volumes:
|
|
||||||
- name: usb-device
|
|
||||||
hostPath:
|
|
||||||
path: /dev/ttyUSB0
|
|
||||||
type: CharDevice
|
|
||||||
- name: run-udev
|
|
||||||
hostPath:
|
|
||||||
path: /run/udev
|
|
||||||
- name: zigbee2mqtt-data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName : zigbee2mqtt-data
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
28
services/zigbee2mqtt/docker-compose.yaml
Normal file
28
services/zigbee2mqtt/docker-compose.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
zigbee2mqtt:
|
||||||
|
container_name: zigbee2mqtt
|
||||||
|
image: koenkk/zigbee2mqtt
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- data:/app/data
|
||||||
|
- /run/udev:/run/udev:ro
|
||||||
|
#ports:
|
||||||
|
# Frontend port
|
||||||
|
#- 8085:8080
|
||||||
|
environment:
|
||||||
|
- TZ=Europe/Lisbon
|
||||||
|
devices:
|
||||||
|
# Make sure this matched your adapter location
|
||||||
|
- /dev/ttyUSB0:/dev/ttyUSB0
|
||||||
|
networks:
|
||||||
|
zigbee2mqtt:
|
||||||
|
reverseproxy_public:
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
networks:
|
||||||
|
zigbee2mqtt:
|
||||||
|
reverseproxy_public:
|
||||||
|
external: true
|
||||||
|
|
||||||
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
# storage-limbosolutions-com
|
|
||||||
|
|
||||||
<https://github.com/seaweedfs/seaweedfs-csi-driver>
|
|
||||||
|
|
||||||
```bash {cwd=../}
|
|
||||||
./storage-limbosolutions-com/ops-scripts/apply.sh
|
|
||||||
```
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
|
|
||||||
seaweedfsFiler: "????:????" # # host and port of your SeaweedFs filer replace helm --set
|
|
||||||
storageClassName: storage-limbosolutions-com
|
|
||||||
storageClassVolumeBindingMode: Immediate
|
|
||||||
isDefaultStorageClass: false
|
|
||||||
tlsSecret: ""
|
|
||||||
imagePullPolicy: "Always" # "Always"
|
|
||||||
driverName: storage-limbosolutions-com-seaweedfs-csi
|
|
||||||
|
|
||||||
controller:
|
|
||||||
# nodeSelector:
|
|
||||||
# role: worker-node
|
|
||||||
resources: {}
|
|
||||||
livenessProbe:
|
|
||||||
failureThreshold:
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
timeoutSeconds: 3
|
|
||||||
periodSeconds: 60
|
|
||||||
|
|
||||||
# DataLocality (inspired by Longhorn) allows instructing the storage-driver which volume-locations will be used or preferred in Pods to read & write.
|
|
||||||
# e.g. Allows Pods to write preferrably to its local dataCenter volume-servers
|
|
||||||
# Requires Volume-Servers to be correctly labelled and matching Topology-Info to be passed into seaweedfs-csi-driver node
|
|
||||||
# Example-Value: "write_preferlocaldc"
|
|
||||||
dataLocality: "none"
|
|
||||||
|
|
||||||
mountService:
|
|
||||||
# Must be enabled for the CSI driver node component to mount volumes
|
|
||||||
enabled: true
|
|
||||||
image: chrislusf/seaweedfs-mount:latest
|
|
||||||
|
|
||||||
node:
|
|
||||||
# Deploy node daemonset
|
|
||||||
enabled: true
|
|
||||||
# When seaweedfs-csi-driver-node pod on node is recreated, all pods on same node using seaweed-csi PV will stop working.
|
|
||||||
# For safe update set updateStrategy.type: OnDelete and manually move pods who use seaweed-csi PV, then delete seaweedfs-csi-driver-node damonset pod
|
|
||||||
updateStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxUnavailable: 25%
|
|
||||||
affinity: {}
|
|
||||||
# dont set tolerations so is not installed on control-plane/master node
|
|
||||||
# tolerations: #dedicated=reserved:NoSchedule
|
|
||||||
# - key: "dedicated"
|
|
||||||
# operator: "Equal"
|
|
||||||
# value: "reserved"
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
livenessProbe:
|
|
||||||
failureThreshold:
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
timeoutSeconds: 3
|
|
||||||
periodSeconds: 60
|
|
||||||
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
generatorOptions:
|
|
||||||
disableNameSuffixHash: true
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: storage-limbosolutions-com
|
|
||||||
labels:
|
|
||||||
name: storage-limbosolutions-com
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
kubectl kustomize ./storage-limbosolutions-com/deploy/infra | kubectl apply -f -
|
|
||||||
|
|
||||||
helm repo add seaweedfs-csi-driver https://seaweedfs.github.io/seaweedfs-csi-driver/helm
|
|
||||||
helm repo update seaweedfs-csi-driver
|
|
||||||
|
|
||||||
helm upgrade --install \
|
|
||||||
--set seaweedfsFiler=192.168.14.32:7888 \
|
|
||||||
--values=./storage-limbosolutions-com/deploy/helm/values.yaml \
|
|
||||||
storage-limbosolutions-com-seaweedfs-csi-driver seaweedfs-csi-driver/seaweedfs-csi-driver --namespace storage-limbosolutions-com
|
|
||||||
Reference in New Issue
Block a user