Compare commits
77 Commits
fix-links
...
fae1e776c9
| Author | SHA1 | Date | |
|---|---|---|---|
| fae1e776c9 | |||
| 24a0a03a57 | |||
| 41c33b23af | |||
| e20fd0b80f | |||
| bb8e4e59ae | |||
| a5136e07a2 | |||
| 1910b1da3a | |||
| d1093834fb | |||
| 07e34529a5 | |||
| fa9a8e67e9 | |||
| e5aa21e3d5 | |||
| 2ee7121c08 | |||
| 042fc295de | |||
| 08f6d60046 | |||
| a70a5cf420 | |||
| 1983102d01 | |||
| 56bd7bca4a | |||
| c3a3db9a5b | |||
| 0b3c8564db | |||
| 66e93a72d0 | |||
| 620106ef60 | |||
| 88f9289665 | |||
| a426a35f86 | |||
| 8325fff602 | |||
| 94ea5525f5 | |||
| 6ac89218fd | |||
| 79cb5c6fd5 | |||
| 2be358ff98 | |||
| 34a918ec8c | |||
| d7b2e561cc | |||
| c27fa22928 | |||
| 4fd23a5d8c | |||
| 36b82c9038 | |||
| 3f5d52a5fa | |||
| 933e774c7e | |||
| 11cea9b6fc | |||
| 9f9a390765 | |||
| 7eb254da2f | |||
| e7b1181dab | |||
| 129e1836d4 | |||
| 4ea6e35522 | |||
| 6d9a1fd947 | |||
| 8d0f2c181a | |||
| d7c41dd3cb | |||
| 260ae62e59 | |||
| bccf153db0 | |||
| 66e09df419 | |||
| edde01efb5 | |||
| ab7f0b2e97 | |||
| fba35ce3bc | |||
| 631ff376e5 | |||
| ca14c75897 | |||
| 86ba316bdc | |||
| a739a72f4a | |||
| 5dda1e9135 | |||
| 9455ef9d80 | |||
| 1193a81fb0 | |||
| f5a368e3f8 | |||
| b2cae5fb94 | |||
| 2a5f3c14ee | |||
| d793e8781c | |||
| 7a2ac916c9 | |||
| 56cd2c69af | |||
| a7d600c84d | |||
| f9bc6ae967 | |||
| b4768c784f | |||
| 79e4d6cd47 | |||
| 7df12159ea | |||
| dcff1e3c7f | |||
| baff515fd5 | |||
| 54d5fc1c92 | |||
| 24edd39cf3 | |||
| 14b46c5ced | |||
| 776c2b3706 | |||
| abbad37871 | |||
| 25802094a5 | |||
| 39e5d5a855 |
@@ -1,109 +0,0 @@
|
|||||||
# If you come from bash you might have to change your $PATH.
|
|
||||||
# export PATH=$HOME/bin:$HOME/.local/bin:/usr/local/bin:$PATH
|
|
||||||
|
|
||||||
# Path to your Oh My Zsh installation.
|
|
||||||
export ZSH="$HOME/.oh-my-zsh"
|
|
||||||
|
|
||||||
# Set name of the theme to load --- if set to "random", it will
|
|
||||||
# load a random theme each time Oh My Zsh is loaded, in which case,
|
|
||||||
# to know which specific one was loaded, run: echo $RANDOM_THEME
|
|
||||||
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
|
|
||||||
ZSH_THEME="gnzh"
|
|
||||||
|
|
||||||
# Set list of themes to pick from when loading at random
|
|
||||||
# Setting this variable when ZSH_THEME="devcontainers"
|
|
||||||
# a theme from this variable instead of looking in $ZSH/themes/
|
|
||||||
# If set to an empty array, this variable will have no effect.
|
|
||||||
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
|
|
||||||
|
|
||||||
# Uncomment the following line to use case-sensitive completion.
|
|
||||||
# CASE_SENSITIVE="true"
|
|
||||||
|
|
||||||
# Uncomment the following line to use hyphen-insensitive completion.
|
|
||||||
# Case-sensitive completion must be off. _ and - will be interchangeable.
|
|
||||||
# HYPHEN_INSENSITIVE="true"
|
|
||||||
|
|
||||||
# Uncomment one of the following lines to change the auto-update behavior
|
|
||||||
# zstyle ':omz:update' mode disabled # disable automatic updates
|
|
||||||
# zstyle ':omz:update' mode auto # update automatically without asking
|
|
||||||
# zstyle ':omz:update' mode reminder # just remind me to update when it's time
|
|
||||||
|
|
||||||
# Uncomment the following line to change how often to auto-update (in days).
|
|
||||||
# zstyle ':omz:update' frequency 13
|
|
||||||
|
|
||||||
# Uncomment the following line if pasting URLs and other text is messed up.
|
|
||||||
# DISABLE_MAGIC_FUNCTIONS="true"
|
|
||||||
|
|
||||||
# Uncomment the following line to disable colors in ls.
|
|
||||||
# DISABLE_LS_COLORS="true"
|
|
||||||
|
|
||||||
# Uncomment the following line to disable auto-setting terminal title.
|
|
||||||
# DISABLE_AUTO_TITLE="true"
|
|
||||||
|
|
||||||
# Uncomment the following line to enable command auto-correction.
|
|
||||||
# ENABLE_CORRECTION="true"
|
|
||||||
|
|
||||||
# Uncomment the following line to display red dots whilst waiting for completion.
|
|
||||||
# You can also set it to another string to have that shown instead of the default red dots.
|
|
||||||
# e.g. COMPLETION_WAITING_DOTS="%F{yellow}waiting...%f"
|
|
||||||
# Caution: this setting can cause issues with multiline prompts in zsh < 5.7.1 (see #5765)
|
|
||||||
# COMPLETION_WAITING_DOTS="true"
|
|
||||||
|
|
||||||
# Uncomment the following line if you want to disable marking untracked files
|
|
||||||
# under VCS as dirty. This makes repository status check for large repositories
|
|
||||||
# much, much faster.
|
|
||||||
# DISABLE_UNTRACKED_FILES_DIRTY="true"
|
|
||||||
|
|
||||||
# Uncomment the following line if you want to change the command execution time
|
|
||||||
# stamp shown in the history command output.
|
|
||||||
# You can set one of the optional three formats:
|
|
||||||
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
|
|
||||||
# or set a custom format using the strftime function format specifications,
|
|
||||||
# see 'man strftime' for details.
|
|
||||||
# HIST_STAMPS="mm/dd/yyyy"
|
|
||||||
|
|
||||||
# Would you like to use another custom folder than $ZSH/custom?
|
|
||||||
# ZSH_CUSTOM=/path/to/new-custom-folder
|
|
||||||
|
|
||||||
# Which plugins would you like to load?
|
|
||||||
# Standard plugins can be found in $ZSH/plugins/
|
|
||||||
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
|
|
||||||
# Example format: plugins=(rails git textmate ruby lighthouse)
|
|
||||||
# Add wisely, as too many plugins slow down shell startup.
|
|
||||||
plugins=(git timer web-search ssh kubectl helm colorize zsh-autosuggestions rclone ansible)
|
|
||||||
ZSH_COLORIZE_TOOL=chroma
|
|
||||||
|
|
||||||
source $ZSH/oh-my-zsh.sh
|
|
||||||
|
|
||||||
# User configuration
|
|
||||||
|
|
||||||
# export MANPATH="/usr/local/man:$MANPATH"
|
|
||||||
|
|
||||||
# You may need to manually set your language environment
|
|
||||||
# export LANG=en_US.UTF-8
|
|
||||||
|
|
||||||
# Preferred editor for local and remote sessions
|
|
||||||
# if [[ -n $SSH_CONNECTION ]]; then
|
|
||||||
# export EDITOR='vim'
|
|
||||||
# else
|
|
||||||
# export EDITOR='nvim'
|
|
||||||
# fi
|
|
||||||
|
|
||||||
# Compilation flags
|
|
||||||
# export ARCHFLAGS="-arch $(uname -m)"
|
|
||||||
|
|
||||||
# Set personal aliases, overriding those provided by Oh My Zsh libs,
|
|
||||||
# plugins, and themes. Aliases can be placed here, though Oh My Zsh
|
|
||||||
# users are encouraged to define aliases within a top-level file in
|
|
||||||
# the $ZSH_CUSTOM folder, with .zsh extension. Examples:
|
|
||||||
# - $ZSH_CUSTOM/aliases.zsh
|
|
||||||
# - $ZSH_CUSTOM/macos.zsh
|
|
||||||
# For a full list of active aliases, run `alias`.
|
|
||||||
#
|
|
||||||
# Example aliases
|
|
||||||
# alias zshconfig="mate ~/.zshrc"
|
|
||||||
# alias ohmyzsh="mate ~/.oh-my-zsh"
|
|
||||||
DISABLE_AUTO_UPDATE=true
|
|
||||||
DISABLE_UPDATE_PROMPT=true
|
|
||||||
|
|
||||||
export PATH=$PATH:/home/vscode/lib
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
|
||||||
// README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu
|
|
||||||
{
|
|
||||||
"name": "casa-dev",
|
|
||||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
|
||||||
"image": "mcr.microsoft.com/devcontainers/base:jammy",
|
|
||||||
"features": {
|
|
||||||
"ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {},
|
|
||||||
"ghcr.io/devcontainers-extra/features/ansible": {},
|
|
||||||
"ghcr.io/devcontainers/features/docker-outside-of-docker": {},
|
|
||||||
},
|
|
||||||
// "features": {},
|
|
||||||
|
|
||||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
|
||||||
// "forwardPorts": [],
|
|
||||||
// Configure tool-specific properties.
|
|
||||||
// "customizations": {},
|
|
||||||
|
|
||||||
"mounts": [
|
|
||||||
"source=${localWorkspaceFolder}/.devcontainer/.zshrc,target=/home/vscode/.zshrc,type=bind",
|
|
||||||
"source=${localWorkspaceFolder}/../lms,target=/workspaces/lms,type=bind",
|
|
||||||
"source=${localWorkspaceFolder}/../homeAssistant,target=/workspaces/homeAssistant,type=bind",
|
|
||||||
"source=${localWorkspaceFolder}/../mosquitto,target=/workspaces/mosquitto,type=bind",
|
|
||||||
"source=${localWorkspaceFolder}/../kb,target=/workspaces/kb,type=bind",
|
|
||||||
"source=${localWorkspaceFolder}/.env.d/kube,target=/home/vscode/.kube,type=bind",
|
|
||||||
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,consistency=cached",
|
|
||||||
"source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,type=bind,consistency=cached"
|
|
||||||
],
|
|
||||||
"postCreateCommand": "bash .devcontainer/scripts/postCreate.sh",
|
|
||||||
"customizations": {
|
|
||||||
"vscode": {
|
|
||||||
"extensions": [
|
|
||||||
"ms-kubernetes-tools.vscode-kubernetes-tools",
|
|
||||||
"redhat.ansible",
|
|
||||||
"mtxr.sqltools-driver-mysql",
|
|
||||||
"stateful.runme",
|
|
||||||
"yzhang.markdown-all-in-one",
|
|
||||||
"davidanson.vscode-markdownlint",
|
|
||||||
"eamodio.gitlens",
|
|
||||||
"m4ns0ur.base64"
|
|
||||||
],
|
|
||||||
"settings": {
|
|
||||||
"terminal.integrated.defaultProfile.linux": "zsh"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
#setup chroma for zsh colorize
|
|
||||||
chmod +x /home/vscode/lib/chroma
|
|
||||||
curl https://rclone.org/install.sh | sudo bash
|
|
||||||
docker context create casa-prod --description "casa prod context" --docker host=ssh://admin@homesrv01.dev.lan
|
|
||||||
docker context use casa-prod
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
on:
|
|
||||||
push:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 16 * * *' # Every day at 16:00
|
|
||||||
jobs:
|
|
||||||
deploy-to-homesrv01:
|
|
||||||
runs-on: "myLimbo-casa-gitea-act-runner"
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
# all certs and key are base64 encoded
|
|
||||||
- name: docker compose up
|
|
||||||
env:
|
|
||||||
KUBERNETES_SERVER: ${{ secrets.KUBERNETES_SERVER }}
|
|
||||||
KUBERNETES_CLIENT_CRT_DATA: ${{ secrets.KUBERNETES_CLIENT_CRT_DATA }}
|
|
||||||
KUBERNETES_CLIENT_KEY_DATA: ${{ secrets.KUBERNETES_CLIENT_KEY_DATA }}
|
|
||||||
KUBERNETES_CRT_AUTHORITY_DATA: ${{ secrets.KUBERNETES_CRT_AUTHORITY_DATA }}
|
|
||||||
run: |
|
|
||||||
docker compose -f ./casa-limbosolutions-com/sync-certs-job/docker-compose.yaml up -d --pull always
|
|
||||||
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
on:
|
|
||||||
push:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 5 * * SUN' # Every Sunday at 05:00
|
|
||||||
jobs:
|
|
||||||
deploy-to-homesrv01:
|
|
||||||
runs-on: "myLimbo-casa-gitea-act-runner"
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: docker compose up
|
|
||||||
run: docker compose -f ./services/wyoming/docker-compose.yaml up -d --pull always
|
|
||||||
37
.gitea/workflows/host.yml
Normal file
37
.gitea/workflows/host.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
name: deploy host
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
# paths:
|
||||||
|
# - "ansible/**"
|
||||||
|
# - ".gitea/workflows/**host**.yml"
|
||||||
|
jobs:
|
||||||
|
Deploy:
|
||||||
|
runs-on: "homesrv-deploy"
|
||||||
|
# - data
|
||||||
|
# - /src/*.json
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# - name: ls
|
||||||
|
# run: ls -lah ${GITHUB_WORKSPACE}
|
||||||
|
|
||||||
|
# - name: Run Ansible Playbook
|
||||||
|
# run: |
|
||||||
|
# docker run --rm --volumes-from ${{ env.JOB_CONTAINER_NAME }} ubuntu:latest ls -lah ${GITHUB_WORKSPACE}; code-server
|
||||||
|
- name: Run Ansible Playbook
|
||||||
|
run: |
|
||||||
|
docker run --rm \
|
||||||
|
-e ANSIBLE_PLAYBOOK_PRIVATE_KEY="${{ secrets.HOST_ANSIBLE_PRIVATE_KEY }}" \
|
||||||
|
-e ANSIBLE_PLAYBOOK_REMOTE_USER="${{ secrets.HOST_ANSIBLE_REMOTE_USER }}" \
|
||||||
|
-e ANSIBLE_PLAYBOOK_INVENTORY="${{ secrets.HOST_ANSIBLE_REMOTE_ADDRESS }}" \
|
||||||
|
-e ANSIBLE_PLAYBOOK_WORKSPACE_PATH=${GITHUB_WORKSPACE}/ansible \
|
||||||
|
--volumes-from ${{ env.JOB_CONTAINER_NAME }} \
|
||||||
|
--entrypoint "/bin/bash" \
|
||||||
|
git.limbosolutions.com/kb/ansible-playbook:dev -c "ls -lah /{GITHUB_WORKSPACE}/ansible; chmod -R a+rwx {GITHUB_WORKSPACE}/ansible; python3 {GITHUB_WORKSPACE}/ansible"
|
||||||
|
|
||||||
|
# --entrypoint "/bin/bash" \
|
||||||
|
#git.limbosolutions.com/kb/ansible-playbook:dev -c "ls -lah ${GITHUB_WORKSPACE}"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
13
.gitea/workflows/services.nginx.yml
Normal file
13
.gitea/workflows/services.nginx.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
name: Portainer stack nginx
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- "services/nginx/**"
|
||||||
|
- ".gitea/workflows/**nginx**.yml"
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: "homesrv-deploy"
|
||||||
|
steps:
|
||||||
|
- name: Portainer stack nginx webhook
|
||||||
|
run: |
|
||||||
|
curl -X POST "${{secrets.PORTAINER_NGINX_WEBHOOK_URL}}"
|
||||||
13
.gitea/workflows/services.wyoming.yml
Normal file
13
.gitea/workflows/services.wyoming.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
name: Portainer stack wyoming
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- "services/wyoming/**"
|
||||||
|
- ".gitea/workflows/**wyoming**.yml"
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: "homesrv-deploy"
|
||||||
|
steps:
|
||||||
|
- name: Portainer stack wyoming webhook
|
||||||
|
run: |
|
||||||
|
curl -X POST "${{secrets.PORTAINER_WYOMING_WEBHOOK_URL}}"
|
||||||
13
.gitea/workflows/services.zigbee2mqtt.yml
Normal file
13
.gitea/workflows/services.zigbee2mqtt.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
name: Portainer stack zigbee2mqtt
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- "services/zigbee2mqtt/**"
|
||||||
|
- ".gitea/workflows/**zigbee2mqtt**.yml"
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: "homesrv-deploy"
|
||||||
|
steps:
|
||||||
|
- name: Portainer stack zigbee2mqtt webhook
|
||||||
|
run: |
|
||||||
|
curl -X POST "${{secrets.PORTAINER_ZIGBEE2MQTT_WEBHOOK_URL}}"
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,6 +8,3 @@ services/zigbee2mqtt/volumes/*
|
|||||||
.vscode/settings.json
|
.vscode/settings.json
|
||||||
gitignore/*
|
gitignore/*
|
||||||
**.local
|
**.local
|
||||||
.secrets
|
|
||||||
ansible/inventory.yml
|
|
||||||
.env.d/*
|
|
||||||
|
|||||||
7
.vscode/casa.full-stack.code-workspace
vendored
7
.vscode/casa.full-stack.code-workspace
vendored
@@ -1,7 +0,0 @@
|
|||||||
{
|
|
||||||
"folders": [
|
|
||||||
{
|
|
||||||
"path": "../../homeAssistant"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
213
README.md
213
README.md
@@ -1,87 +1,130 @@
|
|||||||
# casa
|
# homesrv01.dev.lan
|
||||||
|
|
||||||
Welcome to my home server repository, where Home Assistant and other services are hosted.
|
Welcome to homesrv01.dev.lan git page.
|
||||||
This repository is dedicated to documenting and maintaining the server configuration and workflows.
|
|
||||||
|
|
||||||
The primary goal is to establish a Docker-based server capable of isolating services and communications related to home automation at the network level.
|
This repository serves to document and maintain the server where the home assistant runs.
|
||||||
The server operates within its own VLAN (`homesrv`) but requires controlled communication with other VLANs, such as `IOT Vlan`
|
|
||||||
|
The idea was to create a server with docker where it would be possible to isolate all servers, services and communications related to home automation at network level.
|
||||||
|
|
||||||
|
The server itself is on its own vlan (Vlan: homesrv) but requires communication with the Vlans:
|
||||||
|
|
||||||
|
- IOT
|
||||||
|
|
||||||
|
Using [code-server docker container](#code-server) for Development / Maintenance.
|
||||||
|
|
||||||
|
All host configuration are executed using [ansible](#ansible-roles).
|
||||||
|
|
||||||
<!-- omit in toc -->
|
<!-- omit in toc -->
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [Services](#services)
|
- [Services](#services)
|
||||||
- [myInfra stack](#myinfra-stack)
|
- [myInfra stack](#myinfra-stack)
|
||||||
- [docker](#docker)
|
- [SSH](#ssh)
|
||||||
- [rclone plugin](#rclone-plugin)
|
|
||||||
- [nginx](#nginx)
|
- [nginx](#nginx)
|
||||||
|
- [code-server](#code-server)
|
||||||
- [Home Assistant](#home-assistant)
|
- [Home Assistant](#home-assistant)
|
||||||
- [Lyrion Music Server (LMS)](#lyrion-music-server-lms)
|
- [Lyrion Music Server (LMS)](#lyrion-music-server-lms)
|
||||||
- [Mosquitto](#mosquitto)
|
- [Mosquitto](#mosquitto)
|
||||||
- [Wyoming](#wyoming)
|
- [Wyoming](#wyoming)
|
||||||
- [Zigbee2mqtt](#zigbee2mqtt)
|
- [Zigbee2mqtt](#zigbee2mqtt)
|
||||||
- [Host](#host)
|
- [Docker devices](#docker-devices)
|
||||||
- [Proxmox - container](#proxmox---container)
|
- [Proxmox - lxc container](#proxmox---lxc-container)
|
||||||
- [OS](#os)
|
- [Operation System](#operation-system)
|
||||||
- [logs](#logs)
|
- [Packages and Requirements](#packages-and-requirements)
|
||||||
- [Development, Maintenance and Deployment](#development-maintenance-and-deployment)
|
- [Ansible roles](#ansible-roles)
|
||||||
- [Docker context](#docker-context)
|
- [myInfra.dev.homesrv01.core](#myinfradevhomesrv01core)
|
||||||
|
- [myInfra.dev.homesrv01](#myinfradevhomesrv01)
|
||||||
|
- [fstab](#fstab)
|
||||||
|
|
||||||
## Services
|
## Services
|
||||||
|
|
||||||
### myInfra stack
|
### myInfra stack
|
||||||
|
|
||||||
docker, promtail and telegraf configuration [maintained on myInfra repo](/:root/marcio.fernandes/myInfrastructure).
|
docker, Portainer, promtail and telegraf [maintained on myInfra repo](/marcio.fernandes&myInfra).
|
||||||
|
|
||||||
### docker
|
### SSH
|
||||||
|
|
||||||
#### rclone plugin
|
Deployed and maintained by ansible role [myInfra.dev.homesrv1](#ansible-roles).
|
||||||
|
|
||||||
[https://rclone.org/docker/](https://rclone.org/docker/)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# execute on server
|
|
||||||
sudo apt-get -y install fuse
|
|
||||||
docker plugin install rclone/docker-volume-rclone:amd64 args="-v" --alias rclone --grant-all-permissions
|
|
||||||
docker plugin list
|
|
||||||
```
|
|
||||||
|
|
||||||
if error when enabling plugin.
|
|
||||||
*"rclone.sock: connect: no such file or directory"*
|
|
||||||
remove existing cache.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
rm -r /var/lib/docker-plugins/rclone/cache
|
|
||||||
mkdir -p /var/lib/docker-plugins/rclone/cache
|
|
||||||
```
|
|
||||||
|
|
||||||
[ansible role for plugin configuration](./rclone.docker-plugin.playbook.yaml)
|
|
||||||
|
|
||||||
### nginx
|
### nginx
|
||||||
|
|
||||||
[Docker Compose](./services/nginx/docker-compose.yaml)
|
Using portainer stack (stack name: nginx) connected to this repo. [Docker Compose](./services/nginx/docker-compose.yaml)
|
||||||
|
|
||||||
All sites configurations set during docker build.
|
All configuration is set during docker build.
|
||||||
|
|
||||||
|
### code-server
|
||||||
|
|
||||||
|
Using [custom code-server docker image](/kb/code-server/), includes:
|
||||||
|
|
||||||
|
- docker-cli
|
||||||
|
- ansible
|
||||||
|
- ansible-lint
|
||||||
|
|
||||||
|
For more flexibility on bind mount, stack is maintained directly on portainer (stack name: code-server).
|
||||||
|
|
||||||
|
docker-compose.yml example.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
code-server:
|
||||||
|
container_name: code
|
||||||
|
image: git.limbosolutions.com/kb/code-server:latest
|
||||||
|
environment:
|
||||||
|
- PUID=0
|
||||||
|
- PGID=0
|
||||||
|
- TZ=Europe/London
|
||||||
|
- PASSWORD=${CODESERVER_PASSWORD}
|
||||||
|
- DEFAULT_WORKSPACE=/config/workspace
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- code-server_config:/config
|
||||||
|
- code-server_root:/root
|
||||||
|
- has_config:/config/workspace/has/volumes/config
|
||||||
|
- /:/mnt/hostfs
|
||||||
|
#- ./../nginx/volumes/nginx_conf:/config/workspace/host/repos/homesrv01.dev.lan/services/nginx/volumes/nginx_conf
|
||||||
|
#- mosquitto_config:/config/workspace/host/repos/homesrv01.dev.lan/services/mosquitto/volumes/config
|
||||||
|
#- mosquitto_data:/config/workspace/host/repos/homesrv01.dev.lan/services/mosquitto/volumes/data
|
||||||
|
#- zigbee2mqtt_data:/config/workspace/host/repos/homesrv01.dev.lan/services/zigbee2mqtt/volumes/data
|
||||||
|
- hostfs:/mnt/hostfs
|
||||||
|
networks:
|
||||||
|
- code
|
||||||
|
- reverseproxy_public
|
||||||
|
ports:
|
||||||
|
- 8444:8443
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
# zigbee2mqtt_data:
|
||||||
|
# name: zigbee2mqtt_data
|
||||||
|
# external: true
|
||||||
|
code-server_config:
|
||||||
|
code-server_root:
|
||||||
|
has_config:
|
||||||
|
name: has_app_config
|
||||||
|
external: true
|
||||||
|
#mosquitto_config:
|
||||||
|
# external: true
|
||||||
|
#mosquitto_data:
|
||||||
|
# external: true
|
||||||
|
networks:
|
||||||
|
code:
|
||||||
|
reverseproxy_public:
|
||||||
|
external: true
|
||||||
|
```
|
||||||
|
|
||||||
### Home Assistant
|
### Home Assistant
|
||||||
|
|
||||||
[Git Repo](/:root/marcio.fernandes/homeAssistant)
|
[Git Repo](https://git.limbosolutions.com/marcio.fernandes/homeAssistant)
|
||||||
|
|
||||||
### Lyrion Music Server (LMS)
|
### Lyrion Music Server (LMS)
|
||||||
|
|
||||||
For instructions on setting up the Lyrion Music Server Docker container, refer to the [LMS Git Repository](/:root/marcio.fernandes/lms).
|
[Check git repo](/marcio.fernandes/lms) for more information how to setup Lyrion Music Server docker container. Or [git repo](/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server) for more information about home assistant integration.
|
||||||
|
|
||||||
For information on integrating Lyrion Music Server with Home Assistant, visit the [Home Assistant Git Repository](/:root/marcio.fernandes/homeassistant#squeezebox-lyrion-music-server).
|
Requires [music docker volume](#myinfradevhomesrv01).
|
||||||
|
|
||||||
Using [Docker Rclone plugin](https://rclone.org/docker/) for accessing the bucket where music is stored. Configuration is managed via [Ansible playbook](./rclone.docker-plugin.playbook.yml).
|
|
||||||
|
|
||||||
```sh
|
|
||||||
#configure access to s3 bucket
|
|
||||||
ansible-playbook ./rclone.docker-plugin.playbook.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Mosquitto
|
### Mosquitto
|
||||||
|
|
||||||
[Git Repo](/:root/marcio.fernandes/mosquitto)
|
[Git Repo](https://git.limbosolutions.com/marcio.fernandes/mosquitto)
|
||||||
|
|
||||||
### Wyoming
|
### Wyoming
|
||||||
|
|
||||||
@@ -97,13 +140,11 @@ Used in Rhasspy and Home Assistant for communication with voice services.
|
|||||||
|
|
||||||
This is an open standard of the Open Home Foundation.
|
This is an open standard of the Open Home Foundation.
|
||||||
|
|
||||||
For more information about home assistant integration [check home assistant repo](/:root/marcio.fernandes/homeassistant#wyoming).
|
For more information about home assistant integration [check home assistant repo](/marcio.fernandes/homeassistant#wyoming).
|
||||||
|
|
||||||
[Docker compose file](./services/wyoming/docker-compose.yaml).
|
Currently using portainer stack (name: wyoming) with git reference to this repo. [docker compose file](./services/wyoming/docker-compose.yaml).
|
||||||
|
|
||||||
Continuous deploy [gitea action](.gitea/workflows/deploy-wyoming.yml).
|
Gitea [Continuous deploy action](./.gitea/workflows/services.wyoming.yml)
|
||||||
|
|
||||||
Because of performance wyoming whisper is currently hosted on chimera kubernetes cluster [deployment](./services/wyoming/whisper.kubernetes-deployment.yaml)
|
|
||||||
|
|
||||||
Links:
|
Links:
|
||||||
|
|
||||||
@@ -116,6 +157,8 @@ Links:
|
|||||||
|
|
||||||
Zigbee to MQTT bridge, get rid of your proprietary Zigbee bridges
|
Zigbee to MQTT bridge, get rid of your proprietary Zigbee bridges
|
||||||
|
|
||||||
|
Currently using portainer stack (name: zigbee2mqtt) with git reference to this repo [docker compose](./services/zigbee2mqtt/docker-compose.yaml).
|
||||||
|
|
||||||
SONOFF Universal Zigbee 3.0 USB Dongle Plus attached on [proxmox host](#proxmox---lxc-container).
|
SONOFF Universal Zigbee 3.0 USB Dongle Plus attached on [proxmox host](#proxmox---lxc-container).
|
||||||
|
|
||||||
Patch security on [proxmox host](#proxmox---lxc-container).
|
Patch security on [proxmox host](#proxmox---lxc-container).
|
||||||
@@ -127,17 +170,24 @@ chown 100000:100020 /dev/ttyUSB0
|
|||||||
chown 100000:100020 /dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0
|
chown 100000:100020 /dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_c0e8eeb4b38ded118e7c06f6b86ce6f8-if00-port0
|
||||||
```
|
```
|
||||||
|
|
||||||
[docker compose](./services/zigbee2mqtt/docker-compose.yaml)
|
#### Docker devices
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
....
|
||||||
|
devices:
|
||||||
|
# Make sure this matched your adapter location
|
||||||
|
- /dev/ttyUSB0:/dev/ttyUSB0
|
||||||
|
....
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
Links
|
Links
|
||||||
|
|
||||||
- [https://www.zigbee2mqtt.io/](https://www.zigbee2mqtt.io/)
|
- [https://www.zigbee2mqtt.io/](https://www.zigbee2mqtt.io/)
|
||||||
- [Home assistant integration](/:root/marcio.fernandes/homeassistant#Zigbee2mqtt)
|
- [Home assistant integration](/marcio.fernandes/homeassistant#Zigbee2mqtt)
|
||||||
- [Continuos Deploy - git action](./.gitea/workflows/services.zigbee2mqtt.yml)
|
- [Continuos Deploy - git action](./.gitea/workflows/services.zigbee2mqtt.yml)
|
||||||
|
|
||||||
## Host
|
## Proxmox - lxc container
|
||||||
|
|
||||||
### Proxmox - container
|
|
||||||
|
|
||||||
Currently hosted on a proxmox ubuntu container.
|
Currently hosted on a proxmox ubuntu container.
|
||||||
|
|
||||||
@@ -197,7 +247,7 @@ ls -l /dev/ttyUSB0
|
|||||||
#lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
|
#lxc.mount.entry: /dev/ttyUSB0 dev/ttyUSB0 none bind,optional,create=file
|
||||||
```
|
```
|
||||||
|
|
||||||
### OS
|
## Operation System
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# lsb_release -a
|
# lsb_release -a
|
||||||
@@ -210,20 +260,47 @@ Codename: noble
|
|||||||
6.8.4-3-pve
|
6.8.4-3-pve
|
||||||
```
|
```
|
||||||
|
|
||||||
### logs
|
## Packages and Requirements
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
apt install ansible
|
||||||
|
```
|
||||||
|
|
||||||
|
## Ansible roles
|
||||||
|
|
||||||
|
### myInfra.dev.homesrv01.core
|
||||||
|
|
||||||
|
- ssh server
|
||||||
|
- admin user
|
||||||
|
- git package
|
||||||
|
- curl package
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#execute local
|
||||||
|
sudo ansible-playbook --connection=local --inventory 127.0.0.1, --limit 127.0.0.1 ansible/core.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
### myInfra.dev.homesrv01
|
||||||
|
|
||||||
|
[Requires myInfra stack](#myinfra-stack).
|
||||||
|
|
||||||
|
- daily auto update script
|
||||||
|
- music docker volume. [requires sshfs mount to media on nas](#fstab)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#execute local
|
||||||
|
sudo ansible-playbook --connection=local --inventory 127.0.0.1, --limit 127.0.0.1 ansible/site.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
``` bash
|
||||||
# check auto update scripts logs
|
# check auto update scripts logs
|
||||||
journalctl -r -t auto-update
|
journalctl -r -t auto-update
|
||||||
```
|
```
|
||||||
|
|
||||||
## Development, Maintenance and Deployment
|
## fstab
|
||||||
|
|
||||||
Using visual studio code, docker, ansible and gitea actions.
|
|
||||||
|
|
||||||
### Docker context
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# create context to homesrv01 docker on development devices
|
# /etc/fstab
|
||||||
docker context create homesrv01 --docker host=ssh://admin@homesrv01.dev.lan
|
# used by docker volume music
|
||||||
|
sshfs#media@nas.lan:/home/media /mnt/media@sshfs:nas.lan fuse defaults,_netdev,allow_other,follow_symlinks 0 0
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
inventory = ansible/inventory.yml
|
|
||||||
6
ansible/core.yml
Normal file
6
ansible/core.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
- name: homesrv01 core playbook
|
||||||
|
hosts: all
|
||||||
|
roles:
|
||||||
|
- myInfra.dev.homesrv01.core
|
||||||
|
- myInfra.ssh
|
||||||
|
|
||||||
27
ansible/roles/myInfra.dev.homesrv01.core/.gitignore
vendored
Normal file
27
ansible/roles/myInfra.dev.homesrv01.core/.gitignore
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Ansible Tower ignore list
|
||||||
|
|
||||||
|
# Ansible runtime and backups
|
||||||
|
*.original
|
||||||
|
*.tmp
|
||||||
|
*.bkp
|
||||||
|
*.retry
|
||||||
|
*.*~
|
||||||
|
|
||||||
|
# Tower runtime roles
|
||||||
|
roles/**
|
||||||
|
!roles/myInfra.dev.homesrv01
|
||||||
|
!roles/requirements.yml
|
||||||
|
|
||||||
|
# Try tyo avoid any plain-text passwords
|
||||||
|
*pwd*
|
||||||
|
*pass*
|
||||||
|
*password*
|
||||||
|
*.txt
|
||||||
|
|
||||||
|
# Exclude all binaries
|
||||||
|
*.bin
|
||||||
|
*.jar
|
||||||
|
*.tar
|
||||||
|
*.zip
|
||||||
|
*.gzip
|
||||||
|
*.tgz
|
||||||
0
ansible/roles/myInfra.dev.homesrv01.core/README.md
Normal file
0
ansible/roles/myInfra.dev.homesrv01.core/README.md
Normal file
60
ansible/roles/myInfra.dev.homesrv01.core/meta/main.yml
Normal file
60
ansible/roles/myInfra.dev.homesrv01.core/meta/main.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: your name
|
||||||
|
description: your role description
|
||||||
|
company: your company (optional)
|
||||||
|
|
||||||
|
# If the issue tracker for your role is not on github, uncomment the
|
||||||
|
# next line and provide a value
|
||||||
|
# issue_tracker_url: http://example.com/issue/tracker
|
||||||
|
|
||||||
|
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||||
|
# - BSD-3-Clause (default)
|
||||||
|
# - MIT
|
||||||
|
# - GPL-2.0-or-later
|
||||||
|
# - GPL-3.0-only
|
||||||
|
# - Apache-2.0
|
||||||
|
# - CC-BY-4.0
|
||||||
|
license: license (GPL-2.0-or-later, MIT, etc)
|
||||||
|
|
||||||
|
min_ansible_version: 2.1
|
||||||
|
|
||||||
|
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||||
|
# min_ansible_container_version:
|
||||||
|
|
||||||
|
#
|
||||||
|
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||||
|
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||||
|
# To view available platforms and versions (or releases), visit:
|
||||||
|
# https://galaxy.ansible.com/api/v1/platforms/
|
||||||
|
#
|
||||||
|
# platforms:
|
||||||
|
# - name: Fedora
|
||||||
|
# versions:
|
||||||
|
# - all
|
||||||
|
# - 25
|
||||||
|
# - name: SomePlatform
|
||||||
|
# versions:
|
||||||
|
# - all
|
||||||
|
# - 1.0
|
||||||
|
# - 7
|
||||||
|
# - 99.99
|
||||||
|
|
||||||
|
galaxy_tags: []
|
||||||
|
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||||
|
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||||
|
# remove the '[]' above, if you add tags to this list.
|
||||||
|
#
|
||||||
|
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||||
|
# Maximum 20 tags per role.
|
||||||
|
|
||||||
|
# dependencies:
|
||||||
|
# - role: myInfra.ssh
|
||||||
|
# - role: myInfra.docker.portainer
|
||||||
|
# - role: myInfra.docker.promtail
|
||||||
|
# - role: myInfra.docker.telegraf
|
||||||
|
# - role: myInfra.dev.homesrv01.core
|
||||||
|
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||||
|
# if you add dependencies to this list.
|
||||||
20
ansible/roles/myInfra.dev.homesrv01.core/tasks/main.yml
Normal file
20
ansible/roles/myInfra.dev.homesrv01.core/tasks/main.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: admin user - setup
|
||||||
|
become: true
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: admin
|
||||||
|
shell: /bin/bash
|
||||||
|
groups: sudo,docker
|
||||||
|
append: yes
|
||||||
|
|
||||||
|
- name: Install/Update Packages
|
||||||
|
become: true
|
||||||
|
ansible.builtin.package:
|
||||||
|
name:
|
||||||
|
- git
|
||||||
|
- curl
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
27
ansible/roles/myInfra.dev.homesrv01/.gitignore
vendored
Normal file
27
ansible/roles/myInfra.dev.homesrv01/.gitignore
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Ansible Tower ignore list
|
||||||
|
|
||||||
|
# Ansible runtime and backups
|
||||||
|
*.original
|
||||||
|
*.tmp
|
||||||
|
*.bkp
|
||||||
|
*.retry
|
||||||
|
*.*~
|
||||||
|
|
||||||
|
# Tower runtime roles
|
||||||
|
roles/**
|
||||||
|
!roles/myInfra.dev.homesrv01
|
||||||
|
!roles/requirements.yml
|
||||||
|
|
||||||
|
# Try tyo avoid any plain-text passwords
|
||||||
|
*pwd*
|
||||||
|
*pass*
|
||||||
|
*password*
|
||||||
|
*.txt
|
||||||
|
|
||||||
|
# Exclude all binaries
|
||||||
|
*.bin
|
||||||
|
*.jar
|
||||||
|
*.tar
|
||||||
|
*.zip
|
||||||
|
*.gzip
|
||||||
|
*.tgz
|
||||||
0
ansible/roles/myInfra.dev.homesrv01/README.md
Normal file
0
ansible/roles/myInfra.dev.homesrv01/README.md
Normal file
60
ansible/roles/myInfra.dev.homesrv01/meta/main.yml
Normal file
60
ansible/roles/myInfra.dev.homesrv01/meta/main.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: your name
|
||||||
|
description: your role description
|
||||||
|
company: your company (optional)
|
||||||
|
|
||||||
|
# If the issue tracker for your role is not on github, uncomment the
|
||||||
|
# next line and provide a value
|
||||||
|
# issue_tracker_url: http://example.com/issue/tracker
|
||||||
|
|
||||||
|
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||||
|
# - BSD-3-Clause (default)
|
||||||
|
# - MIT
|
||||||
|
# - GPL-2.0-or-later
|
||||||
|
# - GPL-3.0-only
|
||||||
|
# - Apache-2.0
|
||||||
|
# - CC-BY-4.0
|
||||||
|
license: license (GPL-2.0-or-later, MIT, etc)
|
||||||
|
|
||||||
|
min_ansible_version: 2.1
|
||||||
|
|
||||||
|
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||||
|
# min_ansible_container_version:
|
||||||
|
|
||||||
|
#
|
||||||
|
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||||
|
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||||
|
# To view available platforms and versions (or releases), visit:
|
||||||
|
# https://galaxy.ansible.com/api/v1/platforms/
|
||||||
|
#
|
||||||
|
# platforms:
|
||||||
|
# - name: Fedora
|
||||||
|
# versions:
|
||||||
|
# - all
|
||||||
|
# - 25
|
||||||
|
# - name: SomePlatform
|
||||||
|
# versions:
|
||||||
|
# - all
|
||||||
|
# - 1.0
|
||||||
|
# - 7
|
||||||
|
# - 99.99
|
||||||
|
|
||||||
|
galaxy_tags: []
|
||||||
|
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||||
|
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||||
|
# remove the '[]' above, if you add tags to this list.
|
||||||
|
#
|
||||||
|
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||||
|
# Maximum 20 tags per role.
|
||||||
|
|
||||||
|
# dependencies:
|
||||||
|
# - role: myInfra.dev.homesrv01.core
|
||||||
|
# - role: myInfra.docker.portainer
|
||||||
|
# - role: myInfra.docker.promtail
|
||||||
|
# - role: myInfra.docker.telegraf
|
||||||
|
# - role: myInfra.dev.homesrv01.core
|
||||||
|
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||||
|
# if you add dependencies to this list.
|
||||||
12
ansible/roles/myInfra.dev.homesrv01/tasks/main.yml
Normal file
12
ansible/roles/myInfra.dev.homesrv01/tasks/main.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: docker - music volume
|
||||||
|
community.docker.docker_volume:
|
||||||
|
name: music
|
||||||
|
driver: local
|
||||||
|
driver_options:
|
||||||
|
type: none
|
||||||
|
device: /mnt/media@sshfs:nas.lan/music
|
||||||
|
o: bind
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
27
ansible/roles/myInfra.ssh/.gitignore
vendored
Normal file
27
ansible/roles/myInfra.ssh/.gitignore
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Ansible Tower ignore list
|
||||||
|
|
||||||
|
# Ansible runtime and backups
|
||||||
|
*.original
|
||||||
|
*.tmp
|
||||||
|
*.bkp
|
||||||
|
*.retry
|
||||||
|
*.*~
|
||||||
|
|
||||||
|
# Tower runtime roles
|
||||||
|
roles/**
|
||||||
|
!roles/myInfra.dev.homesrv01
|
||||||
|
!roles/requirements.yml
|
||||||
|
|
||||||
|
# Try tyo avoid any plain-text passwords
|
||||||
|
*pwd*
|
||||||
|
*pass*
|
||||||
|
*password*
|
||||||
|
*.txt
|
||||||
|
|
||||||
|
# Exclude all binaries
|
||||||
|
*.bin
|
||||||
|
*.jar
|
||||||
|
*.tar
|
||||||
|
*.zip
|
||||||
|
*.gzip
|
||||||
|
*.tgz
|
||||||
0
ansible/roles/myInfra.ssh/README.md
Normal file
0
ansible/roles/myInfra.ssh/README.md
Normal file
11
ansible/roles/myInfra.ssh/handlers/main.yml
Normal file
11
ansible/roles/myInfra.ssh/handlers/main.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: SSH - Restart Service
|
||||||
|
become: true
|
||||||
|
ansible.builtin.service:
|
||||||
|
daemon_reload: true
|
||||||
|
name: ssh
|
||||||
|
enabled: true
|
||||||
|
state: restarted
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
60
ansible/roles/myInfra.ssh/meta/main.yml
Normal file
60
ansible/roles/myInfra.ssh/meta/main.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: your name
|
||||||
|
description: your role description
|
||||||
|
company: your company (optional)
|
||||||
|
|
||||||
|
# If the issue tracker for your role is not on github, uncomment the
|
||||||
|
# next line and provide a value
|
||||||
|
# issue_tracker_url: http://example.com/issue/tracker
|
||||||
|
|
||||||
|
# Choose a valid license ID from https://spdx.org - some suggested licenses:
|
||||||
|
# - BSD-3-Clause (default)
|
||||||
|
# - MIT
|
||||||
|
# - GPL-2.0-or-later
|
||||||
|
# - GPL-3.0-only
|
||||||
|
# - Apache-2.0
|
||||||
|
# - CC-BY-4.0
|
||||||
|
license: license (GPL-2.0-or-later, MIT, etc)
|
||||||
|
|
||||||
|
min_ansible_version: 2.1
|
||||||
|
|
||||||
|
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||||
|
# min_ansible_container_version:
|
||||||
|
|
||||||
|
#
|
||||||
|
# Provide a list of supported platforms, and for each platform a list of versions.
|
||||||
|
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
|
||||||
|
# To view available platforms and versions (or releases), visit:
|
||||||
|
# https://galaxy.ansible.com/api/v1/platforms/
|
||||||
|
#
|
||||||
|
# platforms:
|
||||||
|
# - name: Fedora
|
||||||
|
# versions:
|
||||||
|
# - all
|
||||||
|
# - 25
|
||||||
|
# - name: SomePlatform
|
||||||
|
# versions:
|
||||||
|
# - all
|
||||||
|
# - 1.0
|
||||||
|
# - 7
|
||||||
|
# - 99.99
|
||||||
|
|
||||||
|
galaxy_tags: []
|
||||||
|
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||||
|
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||||
|
# remove the '[]' above, if you add tags to this list.
|
||||||
|
#
|
||||||
|
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||||
|
# Maximum 20 tags per role.
|
||||||
|
|
||||||
|
# dependencies:
|
||||||
|
# - role: myInfra.journald
|
||||||
|
# - role: myInfra.docker.portainer
|
||||||
|
# - role: myInfra.docker.promtail
|
||||||
|
# - role: myInfra.docker.telegraf
|
||||||
|
# - role: myInfra.dev.homesrv01.core
|
||||||
|
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||||
|
# if you add dependencies to this list.
|
||||||
30
ansible/roles/myInfra.ssh/tasks/main.yml
Normal file
30
ansible/roles/myInfra.ssh/tasks/main.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: Install/Update openssh-server
|
||||||
|
become: true
|
||||||
|
ansible.builtin.package:
|
||||||
|
name:
|
||||||
|
- openssh-server
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
|
||||||
|
- name: SSH - Setup & Config
|
||||||
|
copy:
|
||||||
|
dest: /etc/ssh/sshd_config.d/10-myLimbo.conf
|
||||||
|
content: |
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# maintained by myInfra.dev.homesrv01 ansible role
|
||||||
|
# https://git.limbosolutions.com/marcio.fernandes/homesrv01.dev.lan
|
||||||
|
|
||||||
|
####################################################################
|
||||||
|
|
||||||
|
PermitRootLogin no
|
||||||
|
PasswordAuthentication no
|
||||||
|
|
||||||
|
notify:
|
||||||
|
- SSH - Restart Service
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
8
ansible/site.yml
Normal file
8
ansible/site.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
- name: homesrv01 playbook
|
||||||
|
hosts: all
|
||||||
|
roles:
|
||||||
|
- myInfra.dev.homesrv01.core
|
||||||
|
- myInfra.ssh
|
||||||
|
- myInfra.dev.homesrv01
|
||||||
|
|
||||||
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
# casa.limbosolutions.com at icarus
|
|
||||||
|
|
||||||
Use icarus cluster context to all documentation and scrips on this folder. [Check Instructions](#icarus-cluster---access) for how to setup required user and roles on icurus and client kubeconfig.
|
|
||||||
|
|
||||||
## certificates (wildcard)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl apply -f ./certs.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#check certificates
|
|
||||||
kubectl get cert -n casa-limbosolutions-com
|
|
||||||
```
|
|
||||||
|
|
||||||
## Icarus cluster - access
|
|
||||||
|
|
||||||
On user computer.
|
|
||||||
*Access to k3s context not required.*
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# create private key
|
|
||||||
openssl genrsa -out ../../.env.d/kube/casa@icarus-user.key 2048
|
|
||||||
|
|
||||||
# create csr
|
|
||||||
openssl req -new -key ../../.env.d/kube/casa@icarus-user.key -out ../../.env.d/kube/casa@icarus-user.csr -subj "/CN=casa/O=limbosolutions"
|
|
||||||
```
|
|
||||||
|
|
||||||
Follow instructions to [setup user and roles on icarus k3s cluster](./k3s-admin.md), and setup kubectl config [kube config](./k3s-kubctl-config.md).
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: casa-limbosolutions-com
|
|
||||||
namespace: casa-limbosolutions-com
|
|
||||||
spec:
|
|
||||||
secretName: casa-limbosolutions-com-tls
|
|
||||||
dnsNames:
|
|
||||||
- "casa.limbosolutions.com"
|
|
||||||
- "*.casa.limbosolutions.com"
|
|
||||||
issuerRef:
|
|
||||||
kind: ClusterIssuer
|
|
||||||
name: letsencrypt-prod
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
# casa on Icarus - admin
|
|
||||||
|
|
||||||
Requires kubernetes admin user access to icarus. All documentation and scripts must be executed on icarus context with an admin account.
|
|
||||||
|
|
||||||
Currently using an symbolic on icarus project on my dev device to this file.
|
|
||||||
|
|
||||||
## kubernetes Namespace
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# delete namespace
|
|
||||||
kubectl create namespace casa-limbosolutions-com
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# delete namespace
|
|
||||||
kubectl delete namespace casa-limbosolutions-com
|
|
||||||
```
|
|
||||||
|
|
||||||
## Roles and Bindings
|
|
||||||
|
|
||||||
``` yaml
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
namespace: casa-limbosolutions-com
|
|
||||||
name: casa-limbosolutions-com
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
- cert-manager.io # to access deployments certs from cert-manager
|
|
||||||
- apps # to access deployments
|
|
||||||
- networking.k8s.io # to access ingresses
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
- services
|
|
||||||
- secrets
|
|
||||||
- certificates
|
|
||||||
- deployments
|
|
||||||
- configmaps
|
|
||||||
- ingresses
|
|
||||||
- persistentvolumeclaims
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- delete
|
|
||||||
- deletecollection
|
|
||||||
```
|
|
||||||
|
|
||||||
``` yaml
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: casa-limbosolutions-com-rb # Replace with your role binding name
|
|
||||||
namespace: casa-limbosolutions-com # Replace with your namespace
|
|
||||||
subjects:
|
|
||||||
- kind: User # or "ServiceAccount" for service accounts
|
|
||||||
name: casa # Replace with the username or service account name
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
namespace: casa-limbosolutions-com
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: casa-limbosolutions-com # The name of the role you created
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
```
|
|
||||||
|
|
||||||
### kubernetes User
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#Deploy csr to k3s
|
|
||||||
cat <<EOF | kubectl apply -f -
|
|
||||||
apiVersion: certificates.k8s.io/v1
|
|
||||||
kind: CertificateSigningRequest
|
|
||||||
metadata:
|
|
||||||
name: casa-user-csr
|
|
||||||
spec:
|
|
||||||
request: $(cat ../.env.d/.kube/casa@icarus-user.csr | base64 | tr -d '\n')
|
|
||||||
signerName: kubernetes.io/kube-apiserver-client
|
|
||||||
usages:
|
|
||||||
- client auth
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Approve csr
|
|
||||||
kubectl certificate approve casa-user-csr
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Download kubernet user crt
|
|
||||||
kubectl get csr casa-user-csr -o jsonpath='{.status.certificate}' | base64 --decode > ./.env.d/casa@icarus-user.crt
|
|
||||||
```
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
|
|
||||||
configs:
|
|
||||||
sync-certs-sh:
|
|
||||||
content: |
|
|
||||||
#!/bin/bash
|
|
||||||
##############################################################################################################################
|
|
||||||
# notes:
|
|
||||||
# --certificate-authority="$${KUBERNETES_CRT_AUTHORITY}" not working, dont hnow why, so using --insecure-skip-tls-verify
|
|
||||||
##############################################################################################################################
|
|
||||||
|
|
||||||
|
|
||||||
set -e
|
|
||||||
mkdir -p /tmp/.kube
|
|
||||||
|
|
||||||
echo "Trace: Setup kube"
|
|
||||||
|
|
||||||
echo "Trace: Processing KUBERNETES_CRT_AUTHORITY_DATA"
|
|
||||||
base64 -d <<< "${KUBERNETES_CRT_AUTHORITY_DATA}" > "$${KUBERNETES_CRT_AUTHORITY}"
|
|
||||||
echo "Trace: Processing KUBERNETES_CRT_AUTHORITY_DATA"
|
|
||||||
base64 -d <<< "${KUBERNETES_CLIENT_CRT_DATA}" > "$${KUBERNETES_CLIENT_CRT}"
|
|
||||||
echo "Trace: Processing KUBERNETES_CLIENT_KEY_DATA"
|
|
||||||
base64 -d <<< "${KUBERNETES_CLIENT_KEY_DATA}" > "$${KUBERNETES_CLIENT_KEY}"
|
|
||||||
|
|
||||||
# while true ; do
|
|
||||||
# sleep 5
|
|
||||||
# done
|
|
||||||
|
|
||||||
|
|
||||||
echo "Trace: Fetching secrets"
|
|
||||||
CERT_NAMES=$(kubectl get secrets \
|
|
||||||
-n casa-limbosolutions-com \
|
|
||||||
--server="$${KUBERNETES_SERVER}" \
|
|
||||||
--client-key="$${KUBERNETES_CLIENT_KEY}" \
|
|
||||||
--client-certificate="$${KUBERNETES_CLIENT_CRT}" \
|
|
||||||
--insecure-skip-tls-verify \
|
|
||||||
-o json | jq -r '.items[].metadata.name')
|
|
||||||
|
|
||||||
for CERT_NAME in $$CERT_NAMES; do
|
|
||||||
echo "Trace: Syncing certificate: $$CERT_NAME"
|
|
||||||
kubectl get secret "$$CERT_NAME" \
|
|
||||||
-n casa-limbosolutions-com \
|
|
||||||
--server="$${KUBERNETES_SERVER}" \
|
|
||||||
--client-key="$${KUBERNETES_CLIENT_KEY}" \
|
|
||||||
--client-certificate="$${KUBERNETES_CLIENT_CRT}" \
|
|
||||||
--insecure-skip-tls-verify \
|
|
||||||
-o json | \
|
|
||||||
jq -r '.data | to_entries[] | "\(.key) \(.value)"' | \
|
|
||||||
while IFS=' ' read -r KEY VALUE; do
|
|
||||||
echo "Processing key: $$KEY"
|
|
||||||
# Decode the base64 value and save it to the appropriate file
|
|
||||||
echo "Trace: Saving key: $$KEY"
|
|
||||||
echo "$$VALUE" | base64 -d > "/etc/ssl/certs/casa-limbosolutions-com-certs/$${CERT_NAME}_$${KEY}"
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Info: Certificates synced successfully."
|
|
||||||
|
|
||||||
services:
|
|
||||||
kubectl:
|
|
||||||
image: bitnami/kubectl:latest
|
|
||||||
environment:
|
|
||||||
KUBERNETES_SERVER: ${KUBERNETES_SERVER}
|
|
||||||
KUBERNETES_CRT_AUTHORITY: /tmp/.kube/ca.crt
|
|
||||||
KUBERNETES_CLIENT_CRT: /tmp/.kube/client.crt
|
|
||||||
KUBERNETES_CLIENT_KEY: /tmp/.kube/client.key
|
|
||||||
container_name: sync-certs-job
|
|
||||||
entrypoint: bash -c /app/sync-certs.sh
|
|
||||||
configs:
|
|
||||||
- source: sync-certs-sh
|
|
||||||
target: /app/sync-certs.sh
|
|
||||||
mode: 0755
|
|
||||||
volumes:
|
|
||||||
- casa-certs:/etc/ssl/certs/casa-limbosolutions-com-certs:rw
|
|
||||||
volumes:
|
|
||||||
casa-certs:
|
|
||||||
name: casa-limbosolutions-com-certs
|
|
||||||
external: true # Atention permission must be set to 1001:1001 (using chown on nginx container command)
|
|
||||||
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
|
|
||||||
{
|
|
||||||
"folders": [
|
|
||||||
{
|
|
||||||
"path": "./",
|
|
||||||
"name": "homesrv01"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "../homeAssistant"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "../lms"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path": "../mosquitto"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "kb",
|
|
||||||
"path": "../kb"
|
|
||||||
},
|
|
||||||
// {
|
|
||||||
// "name": "kb/kb",
|
|
||||||
// "path": "../../kb/kb"
|
|
||||||
// }
|
|
||||||
|
|
||||||
|
|
||||||
],
|
|
||||||
"settings": {
|
|
||||||
"files.exclude": {
|
|
||||||
"**/.git": true,
|
|
||||||
"**/.svn": true,
|
|
||||||
"**/.hg": true,
|
|
||||||
"**/CVS": true,
|
|
||||||
"**/.DS_Store": true,
|
|
||||||
"**/Thumbs.db": true,
|
|
||||||
"kb": true,
|
|
||||||
|
|
||||||
},
|
|
||||||
|
|
||||||
"ansible.python.interpreterPath": "/bin/python",
|
|
||||||
"cSpell.words": [
|
|
||||||
"lmscommunity",
|
|
||||||
"localtime",
|
|
||||||
"lyrionmusicserver",
|
|
||||||
"rclone",
|
|
||||||
"reverseproxy"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
- name: Setup rclone docker plugin
|
|
||||||
become: true
|
|
||||||
vars:
|
|
||||||
# rclone_docker_plugin_config: |
|
|
||||||
# [s3-limbo-storage]
|
|
||||||
# type = s3
|
|
||||||
# provider = SeaweedFS
|
|
||||||
# access_key_id = !!! SET ON INVENTORY !!!
|
|
||||||
# secret_access_key = !!! SET ON INVENTORY !!!
|
|
||||||
# endpoint = !!! SET ON INVENTORY !!!
|
|
||||||
|
|
||||||
hosts:
|
|
||||||
- homesrv01
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Enforce folders
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /var/lib/docker-plugins/rclone/config
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: u=rwx,g=r,o-rwx
|
|
||||||
recurse: true
|
|
||||||
|
|
||||||
- name: Enforce folders
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: /var/lib/docker-plugins/rclone/cache
|
|
||||||
state: directory
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: u=rwx,g=r,o-rwx
|
|
||||||
recurse: true
|
|
||||||
|
|
||||||
- name: Setup rclone s3
|
|
||||||
ansible.builtin.copy:
|
|
||||||
dest: /var/lib/docker-plugins/rclone/config/rclone.conf
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: u=rwx,g-rwx,o-rwx
|
|
||||||
content: "{{ rclone_docker_plugin_config }}"
|
|
||||||
@@ -1,140 +0,0 @@
|
|||||||
services:
|
|
||||||
act-runner:
|
|
||||||
container_name: myLimbo-casa-gitea-act-runner
|
|
||||||
image: docker.io/gitea/act_runner:latest
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- myLimbo-casa-gitea-act-runner-data:/data
|
|
||||||
- myLimbo-casa-gitea-act-runner-config:/config
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
|
|
||||||
environment:
|
|
||||||
- GITEA_INSTANCE_URL=${GITEA_INSTANCE_URL}
|
|
||||||
# When using Docker Secrets, it's also possible to use
|
|
||||||
# GITEA_RUNNER_REGISTRATION_TOKEN_FILE to pass the location.
|
|
||||||
# The env var takes precedence.
|
|
||||||
# Needed only for the first start.
|
|
||||||
- CONFIG_FILE= /config/config.yaml
|
|
||||||
- GITEA_RUNNER_REGISTRATION_TOKEN=${GITEA_RUNNER_REGISTRATION_TOKEN}
|
|
||||||
- GITEA_RUNNER_NAME=myLimbo-casa-gitea-act-runner
|
|
||||||
#- GITEA_RUNNER_CONFIG_FILE="/config/config.yaml"
|
|
||||||
|
|
||||||
|
|
||||||
configs:
|
|
||||||
- source: act-runner-config
|
|
||||||
target: /config/config.yaml
|
|
||||||
mode: 0444
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
myLimbo-casa-gitea-act-runner-data:
|
|
||||||
name: myLimbo-casa-gitea-act-runner-data
|
|
||||||
myLimbo-casa-gitea-act-runner-config:
|
|
||||||
name: myLimbo-casa-gitea-act-runner-config
|
|
||||||
|
|
||||||
|
|
||||||
configs:
|
|
||||||
act-runner-config:
|
|
||||||
content: |
|
|
||||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
|
||||||
|
|
||||||
# You don't have to copy this file to your instance,
|
|
||||||
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
|
||||||
|
|
||||||
log:
|
|
||||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
|
||||||
level: info
|
|
||||||
|
|
||||||
runner:
|
|
||||||
# Where to store the registration result.
|
|
||||||
file: .runner
|
|
||||||
# Execute how many tasks concurrently at the same time.
|
|
||||||
capacity: 1
|
|
||||||
# Extra environment variables to run jobs.
|
|
||||||
envs:
|
|
||||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
|
||||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
|
||||||
# Extra environment variables to run jobs from a file.
|
|
||||||
# It will be ignored if it's empty or the file doesn't exist.
|
|
||||||
env_file: .env
|
|
||||||
# The timeout for a job to be finished.
|
|
||||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
|
||||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
|
||||||
timeout: 3h
|
|
||||||
# The timeout for the runner to wait for running jobs to finish when shutting down.
|
|
||||||
# Any running jobs that haven't finished after this timeout will be cancelled.
|
|
||||||
shutdown_timeout: 0s
|
|
||||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
|
||||||
insecure: false
|
|
||||||
# The timeout for fetching the job from the Gitea instance.
|
|
||||||
fetch_timeout: 5s
|
|
||||||
# The interval for fetching the job from the Gitea instance.
|
|
||||||
fetch_interval: 2s
|
|
||||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
|
||||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
|
||||||
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
|
|
||||||
# If it's empty when registering, it will ask for inputting labels.
|
|
||||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
|
||||||
labels:
|
|
||||||
- "myLimbo-casa-gitea-act-runner:docker://gitea/runner-images:ubuntu-latest"
|
|
||||||
#- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
|
||||||
#- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
|
||||||
#- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
|
||||||
|
|
||||||
cache:
|
|
||||||
# Enable cache server to use actions/cache.
|
|
||||||
enabled: true
|
|
||||||
# The directory to store the cache data.
|
|
||||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
|
||||||
dir: ""
|
|
||||||
# The host of the cache server.
|
|
||||||
# It's not for the address to listen, but the address to connect from job containers.
|
|
||||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
|
||||||
host: ""
|
|
||||||
# The port of the cache server.
|
|
||||||
# 0 means to use a random available port.
|
|
||||||
port: 0
|
|
||||||
# The external cache server URL. Valid only when enable is true.
|
|
||||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
|
||||||
# The URL should generally end with "/".
|
|
||||||
external_server: ""
|
|
||||||
|
|
||||||
container:
|
|
||||||
# Specifies the network to which the container will connect.
|
|
||||||
# Could be host, bridge or the name of a custom network.
|
|
||||||
# If it's empty, act_runner will create a network automatically.
|
|
||||||
network: ""
|
|
||||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
|
||||||
privileged: false
|
|
||||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
|
||||||
options:
|
|
||||||
# The parent directory of a job's working directory.
|
|
||||||
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
|
||||||
# If the path starts with '/', the '/' will be trimmed.
|
|
||||||
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
|
||||||
# If it's empty, /workspace will be used.
|
|
||||||
workdir_parent:
|
|
||||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
|
||||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
|
||||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
|
||||||
# valid_volumes:
|
|
||||||
# - data
|
|
||||||
# - /src/*.json
|
|
||||||
# If you want to allow any volume, please use the following configuration:
|
|
||||||
# valid_volumes:
|
|
||||||
# - '**'
|
|
||||||
valid_volumes: []
|
|
||||||
# overrides the docker client host with the specified one.
|
|
||||||
# If it's empty, act_runner will find an available docker host automatically.
|
|
||||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
|
||||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
|
||||||
docker_host: ""
|
|
||||||
# Pull docker image(s) even if already present
|
|
||||||
force_pull: true
|
|
||||||
# Rebuild docker image(s) even if already present
|
|
||||||
force_rebuild: false
|
|
||||||
|
|
||||||
host:
|
|
||||||
# The parent directory of a job's working directory.
|
|
||||||
# If it's empty, $HOME/.cache/act/ will be used.
|
|
||||||
workdir_parent:
|
|
||||||
|
|
||||||
@@ -1,11 +1,4 @@
|
|||||||
configs:
|
version: '3'
|
||||||
run-sh:
|
|
||||||
content: |
|
|
||||||
#!/bin/sh
|
|
||||||
# patch security so kubctl on sync-certs-job can write to the mounted volume
|
|
||||||
chown -R 1001:1001 /etc/ssl/certs/casa-limbosolutions-com-certs
|
|
||||||
while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g "daemon off;"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
nginx:
|
nginx:
|
||||||
build:
|
build:
|
||||||
@@ -13,29 +6,24 @@ services:
|
|||||||
pull_policy: build
|
pull_policy: build
|
||||||
image: homesrv/nginx:latest
|
image: homesrv/nginx:latest
|
||||||
volumes:
|
volumes:
|
||||||
- casa-limbosolutions-com-certs:/etc/ssl/certs/casa-limbosolutions-com-certs
|
- etc_ssl:/etc/ssl
|
||||||
ports:
|
ports:
|
||||||
- 443:443
|
- 443:443
|
||||||
- 80:80
|
- 80:80
|
||||||
networks:
|
networks:
|
||||||
- public
|
- public
|
||||||
|
- private
|
||||||
|
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
command: /bin/sh -c '/run.sh'
|
|
||||||
configs:
|
|
||||||
- source: run-sh
|
|
||||||
target: /run.sh
|
|
||||||
mode: 0755
|
|
||||||
volumes:
|
volumes:
|
||||||
nginx-conf.d:
|
nginx-conf.d:
|
||||||
|
etc_ssl:
|
||||||
casa-limbosolutions-com-certs:
|
name: nginx_etc_ssl
|
||||||
name: casa-limbosolutions-com-certs
|
|
||||||
external: false
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
public:
|
public:
|
||||||
name: reverseproxy_public
|
name: reverseproxy_public
|
||||||
external: true
|
external: true
|
||||||
|
private:
|
||||||
|
name: reverseproxy_private
|
||||||
|
external: true
|
||||||
|
|
||||||
44
services/nginx/docker/nginx.conf.d/default.conf
Normal file
44
services/nginx/docker/nginx.conf.d/default.conf
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name localhost;
|
||||||
|
|
||||||
|
#access_log /var/log/nginx/host.access.log main;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
}
|
||||||
|
|
||||||
|
#error_page 404 /404.html;
|
||||||
|
|
||||||
|
# redirect server error pages to the static page /50x.html
|
||||||
|
#
|
||||||
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
location = /50x.html {
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
}
|
||||||
|
|
||||||
|
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
|
||||||
|
#
|
||||||
|
#location ~ \.php$ {
|
||||||
|
# proxy_pass http://127.0.0.1;
|
||||||
|
#}
|
||||||
|
|
||||||
|
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
|
||||||
|
#
|
||||||
|
#location ~ \.php$ {
|
||||||
|
# root html;
|
||||||
|
# fastcgi_pass 127.0.0.1:9000;
|
||||||
|
# fastcgi_index index.php;
|
||||||
|
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
|
||||||
|
# include fastcgi_params;
|
||||||
|
#}
|
||||||
|
|
||||||
|
# deny access to .htaccess files, if Apache's document root
|
||||||
|
# concurs with nginx's one
|
||||||
|
#
|
||||||
|
#location ~ /\.ht {
|
||||||
|
# deny all;
|
||||||
|
#}
|
||||||
|
}
|
||||||
|
|
||||||
@@ -4,20 +4,8 @@ map $http_upgrade $connection_upgrade {
|
|||||||
}
|
}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 80 default_server;
|
||||||
server_name casa.limbosolutions.com *.casa.limbosolutions.com has.lan;
|
proxy_buffering off;
|
||||||
return 301 https://has.casa.limbosolutions.com$request_uri;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
server {
|
|
||||||
|
|
||||||
listen 443 ssl;
|
|
||||||
ssl_certificate /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.crt;
|
|
||||||
ssl_certificate_key /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.key;
|
|
||||||
ssl_protocols TLSv1.2 TLSv1.3;
|
|
||||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
proxy_pass http://homeassistant-app:80;
|
proxy_pass http://homeassistant-app:80;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
@@ -27,9 +15,46 @@ server {
|
|||||||
proxy_set_header Connection $connection_upgrade;
|
proxy_set_header Connection $connection_upgrade;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location ^~ /code/ {
|
||||||
|
proxy_pass http://code:8443/;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Protocol $scheme;
|
||||||
|
proxy_set_header X-Url-Scheme $scheme;
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
server {
|
||||||
|
ssl_certificate /etc/ssl/certs/nginx-selfsigned.crt;
|
||||||
|
ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key;
|
||||||
|
listen 443 ssl default_server;
|
||||||
|
|
||||||
|
location ^~ /code/ {
|
||||||
|
proxy_pass http://code:8443/;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Forwarded-Protocol $scheme;
|
||||||
|
proxy_set_header X-Url-Scheme $scheme;
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,10 @@
|
|||||||
server {
|
server {
|
||||||
server_name lms.casa.limbosolutions.com music.casa.limbosolutions.com;
|
listen 80;
|
||||||
listen 443 ssl;
|
proxy_buffering off;
|
||||||
ssl_certificate /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.crt;
|
server_name lms.lan;
|
||||||
ssl_certificate_key /etc/ssl/certs/casa-limbosolutions-com-certs/casa-limbosolutions-com-tls_tls.key;
|
|
||||||
ssl_protocols TLSv1.2 TLSv1.3;
|
|
||||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
set $upstream lms-lms-1;
|
proxy_pass http://lms-lms-1:9000/;
|
||||||
#docker default resolver
|
|
||||||
resolver 127.0.0.11 ipv6=off;
|
|
||||||
proxy_pass http://$upstream:9002;
|
|
||||||
proxy_set_header Host $http_host;
|
proxy_set_header Host $http_host;
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
@@ -25,8 +19,4 @@ server {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
server_name lms.casa.limbosolutions.com music.casa.limbosolutions.com lms.lan music.lan;
|
|
||||||
return 301 https://lms.casa.limbosolutions.com$request_uri;
|
|
||||||
}
|
|
||||||
@@ -1,15 +1,15 @@
|
|||||||
|
version: '3.8'
|
||||||
services:
|
services:
|
||||||
# whisper:
|
whisper:
|
||||||
# image: linuxserver/faster-whisper
|
image: rhasspy/wyoming-whisper
|
||||||
# restart: unless-stopped
|
restart: unless-stopped
|
||||||
# volumes:
|
volumes:
|
||||||
# - whisper_data:/data
|
- whisper_data:/data
|
||||||
# ports:
|
ports:
|
||||||
# - 10300:10300
|
- 10300:10300
|
||||||
# environment:
|
environment:
|
||||||
# - TZ=Europe/Lisbon
|
- TZ=Europe/Lisbon
|
||||||
# - WHISPER_MODEL=small-int8
|
command: [ "--model", "medium-int8", "--language", "en" ]
|
||||||
# - WHISPER_LANG=pt
|
|
||||||
piper:
|
piper:
|
||||||
image: rhasspy/wyoming-piper
|
image: rhasspy/wyoming-piper
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|||||||
@@ -1,55 +0,0 @@
|
|||||||
# currently hosted on chimera k3s cluster
|
|
||||||
# Add nodeSelector to schedule the pod on specific nodes
|
|
||||||
# Example: only schedule on nodes labeled with kubernetes.io/hostname=chimera
|
|
||||||
# Adjust the key/value as needed for your cluster
|
|
||||||
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
namespace: casa-services
|
|
||||||
name: wyoming-whisper
|
|
||||||
labels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
|
|
||||||
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: wyoming-whisper
|
|
||||||
spec:
|
|
||||||
#nodeSelector:
|
|
||||||
#kubernetes.io/hostname: chimera-flashgordon
|
|
||||||
tolerations:
|
|
||||||
- key: "dedicated"
|
|
||||||
value: "reserved"
|
|
||||||
effect: "NoSchedule"
|
|
||||||
|
|
||||||
containers:
|
|
||||||
- name: wyoming-whisper
|
|
||||||
image: linuxserver/faster-whisper
|
|
||||||
ports:
|
|
||||||
- containerPort: 10300
|
|
||||||
hostPort: 10300
|
|
||||||
env:
|
|
||||||
- name: WHISPER_MODEL
|
|
||||||
value: small-int8 # turbo
|
|
||||||
- name: WHISPER_LANG
|
|
||||||
value: pt
|
|
||||||
- name: OMP_NUM_THREADS
|
|
||||||
value: "4"
|
|
||||||
- name: BEAM
|
|
||||||
value: "4"
|
|
||||||
|
|
||||||
# args: ["--threads", "8"]
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# cpu: "500m"
|
|
||||||
# memory: "1Gi"
|
|
||||||
# limits:
|
|
||||||
# cpu: "1"
|
|
||||||
# memory: "2Gi"
|
|
||||||
8
tests/docker-ansible/run-ansible-container.sh
Executable file
8
tests/docker-ansible/run-ansible-container.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
docker image pull git.limbosolutions.com/kb/ansible
|
||||||
|
docker run --rm \
|
||||||
|
-v ${PWD}/scripts:/scripts \
|
||||||
|
-v ${PWD}/workspace:/workspace \
|
||||||
|
-v ${MY_ANSIBLE_PRIVATE_KEY_FILE}/id_ed25519:/data/ansible_private_key \
|
||||||
|
git.limbosolutions.com/kb/ansible \
|
||||||
|
bash /scripts/run.sh
|
||||||
|
|
||||||
11
tests/docker-ansible/scripts/run.sh
Normal file
11
tests/docker-ansible/scripts/run.sh
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
#/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
source /scripts/.env.local
|
||||||
|
echo $ANSIBLE_PRIVATE_KEY > /root/ansible_private_key
|
||||||
|
source /scripts/.env.local
|
||||||
|
cd /workspace
|
||||||
|
chmod 600 /root/ansible_private_key
|
||||||
|
cat /root/ansible_private_key
|
||||||
|
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||||
|
$ANSIBLE_COMMAND
|
||||||
9
tests/docker-ansible/workspace/site.yml
Normal file
9
tests/docker-ansible/workspace/site.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
- name: Homesrv01 playbook
|
||||||
|
hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Ping my hosts
|
||||||
|
ansible.builtin.ping:
|
||||||
|
|
||||||
|
- name: Print message
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: Hello world
|
||||||
Reference in New Issue
Block a user