Compare commits
128 Commits
9e0c0505b3
...
devel
| Author | SHA1 | Date | |
|---|---|---|---|
|
4a516eee15
|
|||
|
15a1411f1a
|
|||
|
868ab721dd
|
|||
|
9776e9a316
|
|||
|
28f1f80d6f
|
|||
|
0f9479731a
|
|||
|
3df0115191
|
|||
|
fcb25b79ce
|
|||
|
e591db8581
|
|||
|
e4fd90c013
|
|||
|
219b03b4ee
|
|||
|
1b941a11a2
|
|||
|
12991700b9
|
|||
|
02b6460cff
|
|||
|
5f602c797f
|
|||
|
538bb26f83
|
|||
|
fa0df823ee
|
|||
|
49eacf103c
|
|||
|
4d1d28c64b
|
|||
|
5803ea337e
|
|||
|
20e9ec68d2
|
|||
|
d901c1d940
|
|||
|
875d8f1538
|
|||
|
1e1677cb4d
|
|||
|
29bccbac02
|
|||
|
80015c6535
|
|||
|
3bcbee1b85
|
|||
|
8f965c3e2b
|
|||
|
88247b4011
|
|||
|
740b73cb7d
|
|||
|
857e83a6fe
|
|||
|
745f6acc04
|
|||
|
43fbb3993b
|
|||
|
955d7e8a64
|
|||
|
614fca41c0
|
|||
|
0163d5ab18
|
|||
|
eb2ad9e60a
|
|||
|
11235ab859
|
|||
|
ce72850721
|
|||
|
d5f92811bd
|
|||
|
2d26caba54
|
|||
|
36ce40d718
|
|||
|
a7d9e1b270
|
|||
|
a6d1d46236
|
|||
|
d9c00a0d9e
|
|||
|
f3008294e4
|
|||
| 58dcf4694f | |||
|
a822fe0915
|
|||
|
cd1910c2bd
|
|||
|
421ceabd9e
|
|||
| 068a33626d | |||
| 8b4fb71160 | |||
| 72d8e7cdde | |||
| 58128eec46 | |||
| 48e7b8208e | |||
| 1c417eda10 | |||
| 14ce2dfea6 | |||
| e9974a054e | |||
| f61baa3f04 | |||
| 46e1366c4f | |||
| 924341a276 | |||
| b36bbec72a | |||
| 7bb00a3586 | |||
| 9cd0cfcb4f | |||
| 80c3565fa1 | |||
| fe0fc835cd | |||
| ed2fd510a5 | |||
| b3e2d1c887 | |||
| 6afb84b443 | |||
| 5ead10afb9 | |||
| 4e1d50504d | |||
| 50e161f8dc | |||
| 3001e19c7e | |||
| 85877f8431 | |||
| 425761f0f5 | |||
| 1e0eb9b426 | |||
| f791b43c86 | |||
| 12ceb3558b | |||
| eb1ff31e30 | |||
| d611301f8a | |||
| 03574c1560 | |||
| ea2f797b30 | |||
| 687e189b18 | |||
| 37b22c7ef5 | |||
| cf22d27c57 | |||
| b4feffc118 | |||
| 0c95df3066 | |||
| be9c658589 | |||
| eb569c05c7 | |||
| f178a7bf78 | |||
| 20450332d4 | |||
| 776e35f1a3 | |||
| 4275a0bc23 | |||
| 674d432773 | |||
| 05b475c464 | |||
| a0be654b92 | |||
| 8c69b7af95 | |||
| 28af9314ef | |||
| 450d8fcb7a | |||
| 01c0e21f94 | |||
| c11e492f8f | |||
| e298d5afa2 | |||
| bcbdd75185 | |||
| 8ac7e0f5a3 | |||
| 197157b830 | |||
| 4069d8a77a | |||
| fe0cd3ab67 | |||
| 2cff4e4354 | |||
| b4f9fba952 | |||
| 8f805c3b15 | |||
| 742ef24a77 | |||
| f66a1fb8cc | |||
| d24a9b2713 | |||
| 487e41c058 | |||
| ce799cceaa | |||
| 12eabe1351 | |||
| 4a21c792e1 | |||
| 9dd76a9161 | |||
| ec6106c73e | |||
| f39804e621 | |||
| ea6ae01f76 | |||
| 202de6d2b4 | |||
| cf0380aee4 | |||
| 4563957e80 | |||
| 7546c88ee4 | |||
| 96ea66b77a | |||
| 732cf53192 | |||
| 6819e6b4cb |
9
.ansible-lint.yaml
Normal file
9
.ansible-lint.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
skip_list:
|
||||
- line-length # don't yell about line length
|
||||
- meta-no-info # we don't publish to galaxy so stop yelling about it
|
||||
- package-latest # we install lots of latest stuff still 😢
|
||||
- experimental # no instability plz, give us a call when ur stable
|
||||
|
||||
warn_list:
|
||||
- no-handler # good to keep, but shouldn't be fatal
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,3 +5,7 @@ playbooks/testing.yml
|
||||
*.idea
|
||||
**/__pycache__/
|
||||
.venv/
|
||||
.ansible/
|
||||
.tox/
|
||||
.terraform/
|
||||
.terraform.lock.*
|
||||
|
||||
@@ -32,3 +32,11 @@ repos:
|
||||
- "--wrap=90"
|
||||
types:
|
||||
- markdown
|
||||
|
||||
- id: terraform
|
||||
name: terraform format
|
||||
entry: terraform
|
||||
language: system
|
||||
args:
|
||||
- fmt
|
||||
files: ".*\\.tf$"
|
||||
|
||||
7
.yamllintrc.yaml
Normal file
7
.yamllintrc.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
yaml-files:
|
||||
- "*.yml"
|
||||
- "*.yaml"
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
9
Makefile
Normal file
9
Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
clean:
|
||||
rm --recursive --force .ansible/
|
||||
rm --recursive --force .tox/
|
||||
|
||||
dev:
|
||||
@poetry install --sync
|
||||
@poetry run pre-commit install
|
||||
@poetry run ansible-galaxy collection install --requirements-file ./requirements.yaml --collections-path ./.ansible
|
||||
@bash ./link-local-collections.sh
|
||||
28
README.md
28
README.md
@@ -2,6 +2,28 @@
|
||||
|
||||
Ansible configs for the Skylab Homelab
|
||||
|
||||
Main entrypoint is through the `ansible` script in this repository. The script sets up
|
||||
basic environment variables to avoid conflicts with other environments and sets the
|
||||
inventory.
|
||||
## Local workstation setup:
|
||||
|
||||
```bash
|
||||
make dev
|
||||
poetry run ansible-playbook ...
|
||||
```
|
||||
|
||||
## Boostraping remote system for management:
|
||||
|
||||
1. Install a supported operating system: [Rocky Linux](https://rockylinux.org),
|
||||
[Fedora](https://getfedora.org)
|
||||
2. During installation create a user named `ansible` with any password
|
||||
3. After installation copy SSH key to the `ansible` user
|
||||
4. Enable password-less sudo access for the `ansible` user with this command:
|
||||
|
||||
```bash
|
||||
sudo tee /etc/sudoers.d/30-ansible <<<"ansible ALL=(ALL) NOPASSWD:ALL"
|
||||
```
|
||||
|
||||
5. Change the UID/GID of the `ansible` user/group to `1400` with these commands:
|
||||
|
||||
```bash
|
||||
sudo usermod -u 1400 ansible
|
||||
sudo groupmod -g 1400 ansible
|
||||
```
|
||||
|
||||
7
ansible
7
ansible
@@ -1,7 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
ANSIBLE_LIBRARY='' \
|
||||
ANSIBLE_FILTER_PLUGINS='' \
|
||||
ANSIBLE_CONFIG='' \
|
||||
ANSIBLE_INVENTORY=$(pwd)/inventory.yaml \
|
||||
"ansible-$1" ${@:2}
|
||||
@@ -1,8 +1,10 @@
|
||||
[defaults]
|
||||
host_key_checking = false
|
||||
host_key_checking = true
|
||||
collections_path = .ansible
|
||||
inventory = inventory/
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = "-o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes"
|
||||
ssh_args = "-o ControlMaster=auto -o ControlPersist=60s"
|
||||
|
||||
[inventory]
|
||||
enable_plugins = ansible.builtin.yaml
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
---
|
||||
all:
|
||||
vars:
|
||||
skylab_state_dir: /var/run/skylab
|
||||
skylab_ansible_venv: "{{ skylab_state_dir }}/ansible-runtime"
|
||||
skylab_pip_version: 19.3.1
|
||||
ansible_user: ansible
|
||||
ansible_ssh_common_args: "-o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes"
|
||||
|
||||
en1:
|
||||
vars:
|
||||
skylab_location: Newton MA
|
||||
# gross hack for now, will be refactored later
|
||||
_skylab_adguard_nat_rule: 8
|
||||
|
||||
hosts:
|
||||
core:
|
||||
ansible_host: 10.42.101.1
|
||||
ansible_port: 4242
|
||||
ansible_network_os: edgeos
|
||||
skylab_description: EN1 Core Router
|
||||
|
||||
iridium:
|
||||
ansible_host: 10.42.101.200
|
||||
skylab_description: Local Monitor Node
|
||||
skylab_targets: [network]
|
||||
|
||||
children:
|
||||
|
||||
cluster:
|
||||
hosts:
|
||||
pegasus: # jupiter
|
||||
ansible_host: 10.42.101.100
|
||||
skylab_description: Arbiter Node
|
||||
skylab_targets: [cluster, datastore]
|
||||
skylab_cluster:
|
||||
address: 10.42.101.10/24
|
||||
interface: bond0
|
||||
|
||||
saturn: # remus
|
||||
ansible_host: 10.42.101.110
|
||||
skylab_description: Operation Node
|
||||
skylab_cluster:
|
||||
address: 10.42.101.110/24
|
||||
interface: bond0
|
||||
skylab_networking:
|
||||
hostname: saturn.skylab.enp.one
|
||||
dns: [10.42.101.1]
|
||||
gateway: 10.42.101.1/24
|
||||
interfaces:
|
||||
bond0:
|
||||
type: bond
|
||||
members: [eno1, eno2]
|
||||
addresses:
|
||||
- 10.42.101.11/24
|
||||
- 10.42.101.110/24
|
||||
dhcp: false
|
||||
bond0.99:
|
||||
type: vlan
|
||||
address: 192.168.42.20/24
|
||||
dhcp: false
|
||||
|
||||
orion: # romulus
|
||||
ansible_host: 10.42.101.120
|
||||
skylab_description: Operation Node
|
||||
skylab_targets: [cluster, datastore]
|
||||
skylab_cluster:
|
||||
address: 10.42.101.12/24
|
||||
interface: bond0
|
||||
skylab_networking:
|
||||
hostname: orion.skylab.enp.one
|
||||
dns: [10.42.101.1]
|
||||
gateway: 10.42.101.1/24
|
||||
interfaces:
|
||||
bond0:
|
||||
type: bond
|
||||
members: [eno1, eno2]
|
||||
addresses:
|
||||
- 10.42.101.12/24
|
||||
- 10.42.101.120/24
|
||||
dhcp: false
|
||||
bond0.99:
|
||||
type: vlan
|
||||
address: 192.168.42.30/24
|
||||
dhcp: false
|
||||
|
||||
en2:
|
||||
vars:
|
||||
skylab_location: DigitalOcean TOR1
|
||||
|
||||
hosts:
|
||||
hubble:
|
||||
ansible_host: en2a.enp.one
|
||||
skylab_description: Cloud Web Server
|
||||
skylab_targets: [cloud]
|
||||
166
inventory/en1.old.yaml
Normal file
166
inventory/en1.old.yaml
Normal file
@@ -0,0 +1,166 @@
|
||||
---
|
||||
workstation:
|
||||
hosts:
|
||||
voyager:
|
||||
skylab_description: Personal Workstation
|
||||
skylab_hostname: voyager.skylab.enp.one
|
||||
skylab_targets: [workstation]
|
||||
|
||||
en1:
|
||||
vars:
|
||||
skylab_location: Newton MA
|
||||
skylab_dashboard: info.en1.local
|
||||
# gross hack for now, will be refactored later
|
||||
_skylab_adguard_nat_rule: 9
|
||||
|
||||
hosts:
|
||||
core:
|
||||
ansible_host: 10.42.101.1
|
||||
ansible_port: 4242
|
||||
ansible_network_os: edgeos
|
||||
skylab_description: EN1 Core Router
|
||||
|
||||
iridium:
|
||||
ansible_host: 10.42.101.200
|
||||
skylab_description: Local Monitor Node
|
||||
skylab_hostname: iridium.skylab.enp.one
|
||||
skylab_targets: [network]
|
||||
skylab_networking:
|
||||
enp4s0:
|
||||
firewall: internal
|
||||
dhcp: false
|
||||
gateway: 10.42.101.1/24
|
||||
dns:
|
||||
- 10.42.101.1
|
||||
addresses:
|
||||
- 10.42.101.200/24
|
||||
|
||||
children:
|
||||
|
||||
cluster:
|
||||
vars:
|
||||
skylab_targets: [cluster, datastore]
|
||||
skylab_compose_version: 3.8
|
||||
skylab_compose_dir: "{{ skylab_state_dir }}/compose"
|
||||
|
||||
hosts:
|
||||
pegasus: # jupiter
|
||||
ansible_host: 10.42.101.100
|
||||
skylab_hostname: pegasus.skylab.enp.one
|
||||
skylab_legacy_names:
|
||||
- jupiter.net.enp.one
|
||||
- jupiter.svr.local
|
||||
skylab_description: Arbiter Node
|
||||
skylab_cluster:
|
||||
address:
|
||||
access: 10.42.101.10/24
|
||||
internal: 192.168.42.10/24
|
||||
interface:
|
||||
access: bond0
|
||||
internal: bond0.99
|
||||
skylab_datastore_device: sdb
|
||||
skylab_networking:
|
||||
eno1:
|
||||
bond: bond0
|
||||
eno2:
|
||||
bond: bond0
|
||||
bond0:
|
||||
device: bond
|
||||
firewall: internal
|
||||
gateway: 10.42.101.1/24
|
||||
dns:
|
||||
- 10.42.101.1
|
||||
addresses:
|
||||
- 10.42.101.100/24
|
||||
- 192.168.255.255/32
|
||||
dhcp: false
|
||||
bond0.99:
|
||||
device: vlan
|
||||
firewall: trusted
|
||||
addresses:
|
||||
- 192.168.42.10/24
|
||||
dhcp: false
|
||||
|
||||
saturn: # remus
|
||||
ansible_host: 10.42.101.110
|
||||
skylab_hostname: saturn.skylab.enp.one
|
||||
skylab_legacy_names:
|
||||
- remus.net.enp.one
|
||||
- remus.svr.local
|
||||
skylab_description: Operational Node
|
||||
skylab_cluster:
|
||||
address:
|
||||
access: 10.42.101.11/24
|
||||
internal: 192.168.42.20/24
|
||||
interface:
|
||||
access: bond0
|
||||
internal: bond0.99
|
||||
skylab_networking:
|
||||
eno1:
|
||||
bond: bond0
|
||||
eno2:
|
||||
bond: bond0
|
||||
bond0:
|
||||
device: bond
|
||||
firewall: internal
|
||||
dhcp: false
|
||||
gateway: 10.42.101.1/24
|
||||
addresses:
|
||||
- 10.42.101.110/24
|
||||
- 192.168.255.255/32
|
||||
dns:
|
||||
- 10.42.101.1
|
||||
bond0.99:
|
||||
device: vlan
|
||||
firewall: trusted
|
||||
dhcp: false
|
||||
addresses:
|
||||
- 192.168.42.20/24
|
||||
|
||||
orion: # romulus
|
||||
ansible_host: 10.42.101.120
|
||||
skylab_hostname: orion.skylab.enp.one
|
||||
skylab_legacy_names:
|
||||
- romulus.net.enp.one
|
||||
- romulus.svr.local
|
||||
skylab_description: Operational Node
|
||||
skylab_cluster:
|
||||
address:
|
||||
access: 10.42.101.12/24
|
||||
internal: 192.168.42.30/24
|
||||
interface:
|
||||
access: bond0
|
||||
internal: bond0.99
|
||||
skylab_datastore_device: sdb
|
||||
skylab_networking:
|
||||
eno1:
|
||||
bond: bond0
|
||||
eno2:
|
||||
bond: bond0
|
||||
bond0:
|
||||
device: bond
|
||||
firewall: internal
|
||||
gateway: 10.42.101.1/24
|
||||
dns:
|
||||
- 10.42.101.1
|
||||
addresses:
|
||||
- 10.42.101.120/24
|
||||
- 192.168.255.255/32
|
||||
dhcp: false
|
||||
bond0.99:
|
||||
device: vlan
|
||||
firewall: trusted
|
||||
addresses:
|
||||
- 192.168.42.30/24
|
||||
dhcp: false
|
||||
|
||||
en2:
|
||||
vars:
|
||||
skylab_location: DigitalOcean TOR1
|
||||
|
||||
hosts:
|
||||
hubble:
|
||||
ansible_host: en2a.enp.one
|
||||
skylab_hostname: hubble.en2.enp.one
|
||||
skylab_description: Cloud Web Server
|
||||
skylab_targets: [cloud]
|
||||
51
inventory/en1.yaml
Normal file
51
inventory/en1.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
en1:
|
||||
|
||||
vars:
|
||||
skylab_location: Cambridge
|
||||
|
||||
children:
|
||||
domain:
|
||||
children:
|
||||
|
||||
cluster:
|
||||
hosts:
|
||||
canaveral:
|
||||
ansible_host: 10.42.101.10
|
||||
skylab_description: Compute and Storage Node
|
||||
baikonur:
|
||||
ansible_host: 10.42.101.11
|
||||
skylab_description: Compute and Storage Node
|
||||
vandenberg:
|
||||
ansible_host: 10.42.101.12
|
||||
skylab_description: Compute and Storage Node
|
||||
andoya:
|
||||
ansible_host: 10.42.101.13
|
||||
skylab_description: Auxilary Compute Node
|
||||
jiuquan:
|
||||
ansible_host: 10.42.101.14
|
||||
skylab_description: Auxilary Compute Node
|
||||
|
||||
datastore:
|
||||
hosts:
|
||||
canaveral:
|
||||
skylab_datastore_block: /dev/sda
|
||||
baikonur:
|
||||
skylab_datastore_block: /dev/sda
|
||||
vandenberg:
|
||||
skylab_datastore_block: /dev/sda
|
||||
|
||||
hosts:
|
||||
3d-printer: {}
|
||||
mediastore: {}
|
||||
backstore: {}
|
||||
|
||||
local:
|
||||
hosts:
|
||||
core: {}
|
||||
switch-1: {}
|
||||
switch-2: {}
|
||||
wap-1: {}
|
||||
wap-2: {}
|
||||
wap-3: {}
|
||||
printer: {}
|
||||
39
inventory/group_vars/all.yaml
Normal file
39
inventory/group_vars/all.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
ansible_user: ansible
|
||||
|
||||
ansible_port: 4242
|
||||
|
||||
skylab_state_dir: /var/lib/skylab
|
||||
|
||||
skylab_ansible_venv: "{{ skylab_state_dir }}/ansible-runtime"
|
||||
|
||||
skylab_ansible_vault_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61323762623165383963316238343539346336663864366631616339356564346636373561616237
|
||||
6666363531393234636337656431366365343236346536320a346163353935366636303131313661
|
||||
32623635363063383039363539303135393838376264356463646465376435616363376163373663
|
||||
6366633665373939380a373234633365376632376433643034336539346338613566353537663731
|
||||
34323464633165626133306464363464333539363761343831316565356266373833
|
||||
|
||||
skylab_tfstate_backend:
|
||||
hostname: cluster.lab.enp.one
|
||||
username: terraform
|
||||
schema: terraform
|
||||
port: 32421
|
||||
password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
30313365393065316563323363663135313438616461356439366632303636343735653033363930
|
||||
6334613931376566363064663539643639326363663933610a306138616362376435386466306538
|
||||
30626330613932363339363438356430613461313335333536623931343436353330393433373630
|
||||
3631343463616631380a386661336534663033383637666538316665303962353034376232356235
|
||||
65323339353563623431666535366465353133343137653232326534326436323661636536373564
|
||||
3466633762303966366366653531613261336561356531636461
|
||||
|
||||
skylab_mgmt:
|
||||
sshport: 4242
|
||||
group: skylab
|
||||
user: ansible
|
||||
id: 1400
|
||||
sshkeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP5TGKururOa1Y+cbv8AWXYI5zhfZCDV0fsBG+33IYUc enpaul@ansible.voyager
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBf7i/8hSJDYnoD95noCJJVtSxxCp9N5EmnshALufiwm enpaul@ansible.opportunity
|
||||
28
link-local-collections.sh
Executable file
28
link-local-collections.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PWD=$(pwd)
|
||||
ANSIBLE_NAMESPACE="skylab"
|
||||
ANSIBLE_COLLECTION_DIR="$PWD/.ansible/ansible_collections"
|
||||
|
||||
mkdir --parents "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE"
|
||||
|
||||
for collection_path in "$PWD"/"$ANSIBLE_NAMESPACE"/*; do
|
||||
collection=$(basename "$collection_path")
|
||||
if [[ ! -L "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE/$collection" ]]; then
|
||||
echo "Linking $ANSIBLE_NAMESPACE.$collection into $ANSIBLE_COLLECTION_DIR"
|
||||
rm --recursive --force "${ANSIBLE_COLLECTION_DIR:?}/$ANSIBLE_NAMESPACE/$collection"
|
||||
ln --symbolic "$PWD/$ANSIBLE_NAMESPACE/$collection" "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE/$collection"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Finished linking local collections"
|
||||
|
||||
LOCAL_COLLECTION_PATH=$(dirname "$ANSIBLE_COLLECTION_DIR")
|
||||
|
||||
if [ -z ${ANSIBLE_COLLECTIONS_PATH+x} ]; then
|
||||
echo "WARNING: Environment variable ANSIBLE_COLLECTIONS_PATH is not set, collections will not be callable"
|
||||
echo " HINT: export ANSIBLE_COLLECTIONS_PATH=$LOCAL_COLLECTION_PATH"
|
||||
elif [[ ${ANSIBLE_COLLECTIONS_PATH} != *"$LOCAL_COLLECTION_PATH"* ]]; then
|
||||
echo "WARNING: Environment variable ANSIBLE_COLLECTIONS_PATH does not include local collection directory"
|
||||
echo " HINT: export ANSIBLE_COLLECTIONS_PATH=\$ANSIBLE_COLLECTIONS_PATH:$LOCAL_COLLECTION_PATH"
|
||||
fi
|
||||
@@ -1 +0,0 @@
|
||||
../resources
|
||||
@@ -1,177 +0,0 @@
|
||||
---
|
||||
- name: Group hosts by platform
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
pre_tasks:
|
||||
- include_tasks: tasks/meta/runtime-group-determination.yaml
|
||||
|
||||
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: linux
|
||||
tags:
|
||||
- always
|
||||
tasks:
|
||||
- name: Install CentOS 8 python bindings
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
state: present
|
||||
name:
|
||||
- python3-libselinux
|
||||
- python3-policycoreutils
|
||||
- python3-firewall
|
||||
|
||||
- name: Create state directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ skylab_state_dir }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Create bootstrap virtualenv
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ ansible_python_interpeter | default(discovered_interpreter_python) }} -m venv {{ skylab_ansible_venv }} --system-site-packages"
|
||||
creates: "{{ skylab_ansible_venv }}/bin/python"
|
||||
|
||||
- name: Pin bootstrap virtualenv pip
|
||||
ansible.builtin.pip:
|
||||
executable: "{{ skylab_ansible_venv }}/bin/pip"
|
||||
name: pip
|
||||
state: present
|
||||
version: "{{ skylab_pip_version }}"
|
||||
|
||||
- name: Copy requirements file to remote
|
||||
ansible.builtin.copy:
|
||||
src: remote-requirements.txt
|
||||
dest: "{{ skylab_ansible_venv }}/requirements.txt"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Install remote requirements
|
||||
ansible.builtin.pip:
|
||||
executable: "{{ skylab_ansible_venv }}/bin/pip"
|
||||
requirements: "{{ skylab_ansible_venv }}/requirements.txt"
|
||||
state: present
|
||||
|
||||
|
||||
- name: Configure common server settings
|
||||
hosts: linux
|
||||
vars_files:
|
||||
- vars/packages.yaml
|
||||
tasks:
|
||||
- name: Set hostname
|
||||
become: true
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
use: systemd
|
||||
|
||||
- name: Install EPEL repository config
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.yum_repository:
|
||||
name: epel
|
||||
description: Extra Packages for Enterprise Linux
|
||||
baseurl: https://download.fedoraproject.org/pub/epel/$releasever{{ '/Everything' if ansible_distribution_major_version == '8' else '' }}/$basearch/
|
||||
|
||||
- name: Install EPEL GPG key
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.rpm_key:
|
||||
state: present
|
||||
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
|
||||
|
||||
- name: Disable sudo password for WHEEL group
|
||||
when: ansible_distribution == "Rocky" or ansible_distribution == "CentOS"
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: wheel-group-no-sudo-password
|
||||
dest: /etc/sudoers.d/30-wheel
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Install global bash config
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: global.sh
|
||||
dest: /etc/profile.d/ZZ-skylab-global.sh
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Install universal packages
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ skylab_packages_global + skylab_packages_rocky }}"
|
||||
state: present
|
||||
|
||||
|
||||
- name: Configure SSH
|
||||
hosts: linux
|
||||
handlers:
|
||||
- name: restart-sshd
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: sshd
|
||||
state: restarted
|
||||
tasks:
|
||||
- name: Disable root auth
|
||||
become: true
|
||||
ansible.builtin.replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "^.*PermitRootLogin (yes|no).*$"
|
||||
replace: "PermitRootLogin no"
|
||||
notify: [restart-sshd]
|
||||
|
||||
- name: Disable password auth
|
||||
become: true
|
||||
ansible.builtin.replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "^.*PasswordAuthentication (yes|no).*$"
|
||||
replace: "PasswordAuthentication no"
|
||||
notify: [restart-sshd]
|
||||
|
||||
- name: Disable challenge response auth
|
||||
become: true
|
||||
ansible.builtin.replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "^.*ChallengeResponseAuthentication (yes|no).*$"
|
||||
replace: "ChallengeResponseAuthentication no"
|
||||
notify: [restart-sshd]
|
||||
|
||||
- name: Disable GSSAPI auth
|
||||
become: true
|
||||
ansible.builtin.replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "^.*GSSAPIAuthentication (yes|no).*$"
|
||||
replace: "GSSAPIAuthentication no"
|
||||
notify: [restart-sshd]
|
||||
|
||||
- name: Disable dynamic MOTD on debian systems
|
||||
when: ansible_os_family == "Debian"
|
||||
ansible.builtin.replace:
|
||||
path: /etc/pam.d/sshd
|
||||
regexp: "^session optional pam_motd.so motd=/run/motd.dynamic"
|
||||
replace: "#session optional pam_motd.so motd=/run/motd.dynamic"
|
||||
|
||||
- name: Disable Cockpit activation message on Rocky
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/motd.d/cockpit
|
||||
state: absent
|
||||
|
||||
- name: Copy MOTD to remote
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: motd.j2
|
||||
dest: /etc/motd
|
||||
mode: 0644
|
||||
|
||||
|
||||
- import_playbook: update.yaml
|
||||
@@ -1 +0,0 @@
|
||||
../tasks
|
||||
@@ -1 +0,0 @@
|
||||
../resources
|
||||
@@ -1 +0,0 @@
|
||||
../vars/
|
||||
2628
poetry.lock
generated
2628
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -6,22 +6,22 @@ authors = ["Ethan Paul <me@enp.one>"]
|
||||
license = "MIT"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.8"
|
||||
ansible = "^3.4.0"
|
||||
docker = "^4.2.0"
|
||||
docker-compose = "^1.25.4"
|
||||
python = "^3.10"
|
||||
ansible-core = "^2.14.3"
|
||||
docker = "^6.0.1"
|
||||
paramiko = "^2.7.1"
|
||||
jsondiff = "^1.2.0"
|
||||
jsondiff = "^2.0.0"
|
||||
netaddr = "^0.8.0"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
ansible-lint = "^4.2.0"
|
||||
pre-commit = "^2.9.2"
|
||||
pre-commit-hooks = "^3.3.0"
|
||||
safety = "^1.9.0"
|
||||
ansible-lint = {version = "^6.14.0", markers = "platform_system != 'Windows'"}
|
||||
ipython = "^8.11.0"
|
||||
mdformat = "^0.7.16"
|
||||
mdformat-gfm = "^0.3.5"
|
||||
poetry = "^1.3.0"
|
||||
pre-commit = "^3.2.0"
|
||||
pre-commit-hooks = "^4.4.0"
|
||||
safety = "^2.3.5"
|
||||
tox = "^3.20.1"
|
||||
tox-poetry-installer = "^0.8.1"
|
||||
yamllint = "^1.20.0"
|
||||
mdformat = "^0.7.9"
|
||||
mdformat-gfm = "^0.3.3"
|
||||
ipython = "^7.28.0"
|
||||
tox-poetry-installer = {extras = ["poetry"], version = "^0.10.0"}
|
||||
yamllint = "^1.29.0"
|
||||
|
||||
4
requirements.yaml
Normal file
4
requirements.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- source: ./skylab/
|
||||
type: subdirs
|
||||
@@ -1,11 +0,0 @@
|
||||
|
||||
//////// /// /// /// /// /// /////// //////
|
||||
/// /// /// /// /// /// /// /// /// ///
|
||||
/// //////// /////// /// ///////// ///////
|
||||
/////// /// /// /// /// /// /// /// ///
|
||||
/// /// /// ///// /// /// /// ///////
|
||||
/// ******************* /// ********************
|
||||
////// /////////
|
||||
|
||||
> {{ skylab_description }} @{{ skylab_location }}
|
||||
{{ '' }}
|
||||
@@ -1 +0,0 @@
|
||||
%wheel ALL=(ALL) NOPASSWD: ALL
|
||||
3
skylab/core/README.md
Normal file
3
skylab/core/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Ansible Collection - skylab.core
|
||||
|
||||
Documentation for the collection.
|
||||
26
skylab/core/galaxy.yml
Normal file
26
skylab/core/galaxy.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
namespace: skylab
|
||||
name: core
|
||||
version: 0.0.0
|
||||
description: Network deployment procedures and configuration state management
|
||||
authors:
|
||||
- Ethan Paul <me@enp.one>
|
||||
license:
|
||||
- MIT
|
||||
readme: README.md
|
||||
tags: []
|
||||
repository: https://vcs.enp.one/skylab/skylab-ansible/
|
||||
build_ignore: []
|
||||
|
||||
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||
# collection label 'namespace.name'. The value is a version range
|
||||
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||
# range specifiers can be set and are separated by ','
|
||||
dependencies:
|
||||
ansible.netcommon: ">=2.5.0,<3.0.0"
|
||||
ansible.posix: ">=1.3.0,<2.0.0"
|
||||
ansible.utils: ">=2.4.3,<3.0.0"
|
||||
community.docker: ">=2.0.2,<3.0.0"
|
||||
community.network: ">=3.0.0,<4.0.0"
|
||||
community.general: ">=4.1.0,<5.0.0"
|
||||
community.crypto: ">=1.0.0,<2.0.0"
|
||||
2
skylab/core/meta/runtime.yml
Normal file
2
skylab/core/meta/runtime.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
requires_ansible: ">=2.11,<2.15"
|
||||
47
skylab/core/playbooks/configure.yaml
Normal file
47
skylab/core/playbooks/configure.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Group hosts by platform
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
pre_tasks:
|
||||
- include_tasks: tasks/meta/runtime-group-determination.yaml
|
||||
|
||||
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: linux
|
||||
gather_facts: false
|
||||
tags:
|
||||
- always
|
||||
tasks:
|
||||
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
|
||||
|
||||
|
||||
# [lemony snicket voice] "server" here being a word used to mean "not a workstation"
|
||||
- name: Configure servers
|
||||
hosts: linux:!workstation
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: skylab.core.server
|
||||
|
||||
|
||||
- name: Configure cluster
|
||||
hosts: linux:&cluster
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: skylab.core.datastore
|
||||
- role: skylab.core.swarm
|
||||
|
||||
|
||||
- name: Configure dashboard nodes
|
||||
hosts: iridium
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: skylab.core.dashboard
|
||||
dashboard_hostname: "{{ skylab_dashboard }}"
|
||||
|
||||
|
||||
- name: Configure workstations
|
||||
hosts: workstation
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: skylab.core.workstation
|
||||
200
skylab/core/playbooks/deploy.yaml
Normal file
200
skylab/core/playbooks/deploy.yaml
Normal file
@@ -0,0 +1,200 @@
|
||||
---
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: linux
|
||||
tags:
|
||||
- always
|
||||
tasks:
|
||||
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
|
||||
|
||||
|
||||
- name: Clean up old orechestration data
|
||||
hosts: cluster
|
||||
gather_facts: false
|
||||
tags:
|
||||
- cleanup
|
||||
vars_files:
|
||||
- vars/services.yaml
|
||||
- vars/access.yaml
|
||||
tasks:
|
||||
- name: Create compose storage directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ skylab_compose_dir }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ skylab_group_admin.name }}"
|
||||
mode: 0770
|
||||
|
||||
- name: Fetch existing compose files
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/bin/ls {{ skylab_compose_dir }}
|
||||
changed_when: false
|
||||
register: _compose_contents_raw
|
||||
|
||||
- name: Remove legacy compose files
|
||||
when: item.replace('.yaml', '') not in skylab_services
|
||||
ansible.builtin.file:
|
||||
path: "{{ skylab_compose_dir }}/{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _compose_contents_raw.stdout_lines }}"
|
||||
|
||||
- name: Fetch existing stacks
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
community.docker.docker_stack_info: {}
|
||||
register: _stack_info
|
||||
|
||||
- name: Remove legacy stacks
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
when: item.Orchestrator == 'Swarm' and item.Name not in skylab_services
|
||||
community.docker.docker_stack:
|
||||
name: "{{ item.Name }}"
|
||||
state: absent
|
||||
loop: "{{ _stack_info.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.Name }}"
|
||||
|
||||
- name: Fetch existing Nginx configs
|
||||
ansible.builtin.command:
|
||||
cmd: ls {{ local_datastore_mount }}/appdata/nginx/conf.d/
|
||||
changed_when: false
|
||||
register: _nginx_configs
|
||||
|
||||
- name: Remove legacy nginx configs
|
||||
when: item.replace('.conf', '') not in skylab_services
|
||||
ansible.builtin.file:
|
||||
path: "{{ local_datastore_mount }}/appdata/nginx/conf.d/{{ item }}.conf"
|
||||
state: absent
|
||||
loop: "{{ _nginx_configs.stdout_lines }}"
|
||||
|
||||
|
||||
- name: Deploy stack service{{ (' ' + service) if service is defined else 's' }}
|
||||
hosts: cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
local_datastore_mount: /mnt/datastore
|
||||
vars_files:
|
||||
- vars/access.yaml
|
||||
- vars/services.yaml
|
||||
tasks:
|
||||
- name: Validate user input
|
||||
when: service is defined
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- service in skylab_services
|
||||
|
||||
- name: Determine service stacks to deploy
|
||||
ansible.builtin.set_fact:
|
||||
_services: "{{ {service: skylab_services[service]} if service is defined else skylab_services }}"
|
||||
|
||||
- name: Determine app account mapping
|
||||
vars:
|
||||
_service_accounts: {}
|
||||
when: item.service | default(false)
|
||||
ansible.builtin.set_fact:
|
||||
_service_accounts: "{{ _service_accounts | combine({item.name: item}) }}"
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Create compose directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ skylab_compose_dir }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ skylab_group_admin.name }}"
|
||||
mode: 0770
|
||||
|
||||
- name: Install compose file
|
||||
vars:
|
||||
app: "{{ item.value }}"
|
||||
_app_account: "{{ _service_accounts[item.value.user] if item.value.user is defined else false }}"
|
||||
ansible.builtin.template:
|
||||
src: docker-compose/{{ item.key }}.yaml.j2
|
||||
dest: "{{ skylab_compose_dir }}/{{ item.key }}.yaml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ skylab_group_admin.name }}"
|
||||
mode: 0660
|
||||
loop: "{{ _services | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Create automation groups
|
||||
become: true
|
||||
when: item.value.user is defined
|
||||
ansible.builtin.group:
|
||||
name: "{{ item.value.user }}"
|
||||
gid: "{{ _service_accounts[item.value.user].uid }}"
|
||||
state: present
|
||||
loop: "{{ _services | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Create automation accounts
|
||||
become: true
|
||||
when: item.value.user is defined
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.value.user }}"
|
||||
state: present
|
||||
uid: "{{ _service_accounts[item.value.user].uid }}"
|
||||
group: "{{ item.value.user }}"
|
||||
groups: "{{ [skylab_group_automation.name, skylab_group.name] }}"
|
||||
system: true
|
||||
generate_ssh_key: false
|
||||
password: "{{ _service_accounts[item.value.user].password }}"
|
||||
loop: "{{ _services | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Configure datastore directories
|
||||
run_once: true
|
||||
block:
|
||||
- name: Determine volume directories
|
||||
vars:
|
||||
_stack_volume_directories: []
|
||||
when: item.value.volumes is defined
|
||||
ansible.builtin.set_fact:
|
||||
_stack_volume_directories: "{{ _stack_volume_directories + [{'user': (item.value.user | default(ansible_user)), 'volumes': (item.value.volumes.values() | list)}] }}"
|
||||
loop: "{{ _services | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Create service directories
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ local_datastore_mount }}{{ item.1 }}"
|
||||
state: directory
|
||||
owner: "{{ item.0.user }}"
|
||||
group: "{{ skylab_group_admin.name }}"
|
||||
mode: 0770
|
||||
loop: "{{ _stack_volume_directories | subelements('volumes') }}"
|
||||
|
||||
- name: Deploy stack
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
community.docker.docker_stack:
|
||||
name: "{{ item.key }}"
|
||||
compose:
|
||||
- "{{ skylab_compose_dir }}/{{ item.key }}.yaml"
|
||||
prune: false
|
||||
state: present
|
||||
loop: "{{ _services | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Configure reverse proxy
|
||||
run_once: true
|
||||
block:
|
||||
- name: Create nginx config
|
||||
when: item.value.domain is defined
|
||||
ansible.builtin.template:
|
||||
src: stack-nginx.conf.j2
|
||||
dest: "{{ local_datastore_mount }}/appdata/nginx/conf.d/{{ item.key }}.conf"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ skylab_group_admin.name }}"
|
||||
mode: 0464
|
||||
loop: "{{ _services | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.value.domain | default(item.key) }}"
|
||||
@@ -2,11 +2,12 @@ function _parse_git_branch() {
|
||||
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
|
||||
}
|
||||
|
||||
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
|
||||
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[37m\]:\w\[\e[33m\]\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\[\e[0;97m\]$\[\e[0m\] "
|
||||
export rc=/home/$USERNAME/.bashrc
|
||||
export VIRTUALENV_DIR=/home/$USERNAME/.venvs
|
||||
export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-bundle.crt
|
||||
|
||||
random() {
|
||||
function random() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
num=32
|
||||
else
|
||||
@@ -19,9 +20,10 @@ function up() { cd $(eval printf '../'%.0s {1..$1}); }
|
||||
|
||||
function pipin() { pip freeze | grep $1; }
|
||||
|
||||
function continuous () { while true; do ${@}; sleep 3; done; }
|
||||
|
||||
alias bk='cd -'
|
||||
alias fuck='sudo $(history -p \!\!)'
|
||||
alias ls='ls -lshF --color --group-directories-first --time-style=long-iso'
|
||||
alias version='uname -orp && lsb_release -a | grep Description'
|
||||
alias activate='source ./bin/activate'
|
||||
alias cls='clear'
|
||||
@@ -32,3 +34,4 @@ alias whatismyip='curl https://icanhazip.com/'
|
||||
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
|
||||
alias epoch="python3 -c 'import time; print(time.time());'"
|
||||
alias uptime="command uptime --pretty"
|
||||
alias unmount="umount"
|
||||
137
skylab/core/playbooks/files/pingtest.bash
Executable file
137
skylab/core/playbooks/files/pingtest.bash
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
declare FMT_RESET
|
||||
declare FMT_BOLD
|
||||
declare FMT_GREEN
|
||||
declare FMT_RED
|
||||
declare NL
|
||||
FMT_RESET=$(printf "\\e[0m")
|
||||
FMT_BOLD=$(printf "\\e[1m")
|
||||
FMT_GREEN=$(printf "\\e[32m")
|
||||
FMT_RED=$(printf "\\e[31m")
|
||||
NL=$'\n'
|
||||
readonly FMT_RESET
|
||||
readonly FMT_BOLD
|
||||
readonly FMT_GREEN
|
||||
readonly FMT_RED
|
||||
readonly NL
|
||||
|
||||
|
||||
function usage() {
|
||||
cat << __EOF__
|
||||
${FMT_GREEN}$(basename "$0")${FMT_RESET}: \
|
||||
|
||||
Ping hosts and print status
|
||||
|
||||
${FMT_BOLD}Usage:${FMT_RESET}
|
||||
$(basename "$0") [-h] [--service|--network]
|
||||
|
||||
${FMT_GREEN}-h --help${FMT_RESET}
|
||||
Print this message and exit.
|
||||
|
||||
${FMT_GREEN}--services${FMT_RESET}
|
||||
Report service status
|
||||
|
||||
${FMT_GREEN}--network${FMT_RESET}
|
||||
Report network status
|
||||
|
||||
__EOF__
|
||||
}
|
||||
|
||||
function _fmt_online() { echo "${FMT_BOLD}${FMT_GREEN}ONLINE${FMT_RESET}"; }
|
||||
|
||||
function _fmt_offline() { echo "${FMT_BOLD}${FMT_RED}OFFLINE${FMT_RESET}"; }
|
||||
|
||||
function _test_cmd() { if eval "$1" &>/dev/null ; then echo "${2}~$(_fmt_online)"; else echo "${2}~$(_fmt_offline)"; fi }
|
||||
|
||||
function _test_ping() { _test_cmd "ping -W 2 -c 1 ${1}" "${2}"; }
|
||||
|
||||
function _test_curl_head() { _test_cmd "curl --fail --head ${1}" "${2}"; }
|
||||
|
||||
function _test_curl_get() { _test_cmd "curl --fail --get ${1}" "${2}"; }
|
||||
|
||||
function _test_curl_insecure() { _test_cmd "curl --fail --head --insecure ${1}" "${2}"; }
|
||||
|
||||
function _test_netcat() { _test_cmd "nc -z ${1} ${2}" "${3}"; }
|
||||
|
||||
function network() {
|
||||
local uplink_address="1.1.1.1"
|
||||
|
||||
declare -a infra=("core.en1.local" "switch.en1.local" "wap-1.en1.local" "wap-2.en1.local" "wap-3.en1.local" "printer.en1.local")
|
||||
declare -a infra_names=("Core Router" "Core Switch" "Wireless AP 1" "Wireless AP 2" "Wireless AP 3" "Printer")
|
||||
|
||||
declare -a lab=("cluster.skylab.enp.one" "pegasus.skylab.enp.one" "saturn.skylab.enp.one" "orion.skylab.enp.one" "iridium.skylab.enp.one" "en2.enp.one")
|
||||
declare -a lab_names=("Cluster" "Pegasus" "Saturn" "Orion" "Iridium" "Hubble")
|
||||
|
||||
local output=$(_test_ping "$uplink_address" "UPLINK")
|
||||
output+="${NL}";
|
||||
|
||||
output+="${NL}INFRASTRUCTURE~STATE${NL}"
|
||||
for (( index=0; index<"${#infra[@]}"; index++ )); do
|
||||
output+=$(_test_ping "${infra[$index]}" "${infra_names[$index]}")
|
||||
output+="${NL}"
|
||||
done
|
||||
|
||||
output+="${NL}HOMELAB~STATE${NL}"
|
||||
for (( index=0; index<"${#lab[@]}"; index++ )); do
|
||||
output+=$(_test_ping "${lab[$index]}" "${lab_names[$index]}")
|
||||
output+="${NL}"
|
||||
done
|
||||
|
||||
column -e -t -s '~' <<<"$output"
|
||||
}
|
||||
|
||||
function services() {
|
||||
local output="INTERNAL~STATE${NL}"
|
||||
|
||||
output+=$(_test_netcat "cluster.skylab.enp.one" "53" "AdGuard DNS")
|
||||
output+="${NL}"
|
||||
output+=$(_test_netcat "core.en1.local" "53" "Fallback DNS")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_insecure "https://cluster.skylab.enp.one:8443/status" "Ubiquiti WLC")
|
||||
output+="${NL}"
|
||||
|
||||
output+="${NL}PUBLIC~STATE${NL}"
|
||||
|
||||
output+=$(_test_curl_head "https://pms.enp.one/web/index.html" "Plex Media Server")
|
||||
output+="${NL}"
|
||||
output+=$(_test_netcat "cluster.skylab.enp.one" "25565" "Minecraft Server")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_get "https://vcs.enp.one/api/v1/version" "Version Control")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_get "https://ssv.enp.one/api/alive" "Bitwarden")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_head "https://cdn.enp.one/heartbeat" "Digital Ocean CDN")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_head "https://doc.enp.one/" "Documentation")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_head "https://enpaul.net/" "enpaul.net")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_head "https://allaroundhere.org/" "allaroundhere.org")
|
||||
output+="${NL}"
|
||||
output+=$(_test_curl_head "https://enp.one/" "enp.one")
|
||||
output+="${NL}"
|
||||
|
||||
column -e -t -s'~' <<<"$output"
|
||||
}
|
||||
|
||||
function main() {
|
||||
if [[ "$1" =~ ^(-h|--help)$ ]]; then
|
||||
usage;
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" = "--network" ]]; then
|
||||
network;
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" = "--services" ]]; then
|
||||
services;
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
||||
main "${@}"
|
||||
fi
|
||||
@@ -2,6 +2,9 @@
|
||||
- name: Check cluster state
|
||||
hosts: cluster
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Configure remot execution environment
|
||||
ansible.builtin.import_tasks: tasks/meta/bootstrap-remote-env.yaml
|
||||
tasks:
|
||||
- name: Validate user input
|
||||
run_once: true
|
||||
@@ -43,10 +46,10 @@
|
||||
- name: Set common fact for node addresses
|
||||
vars:
|
||||
_node_addresses:
|
||||
- "{{ lookup('vars', 'ansible_' + skylab_cluster.interface).ipv4.address }}"
|
||||
- "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.address }}"
|
||||
ansible.builtin.set_fact:
|
||||
_node_addresses: "{{ _node_addresses + [item.address] }}"
|
||||
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface).ipv4_secondaries }}"
|
||||
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4_secondaries }}"
|
||||
loop_control:
|
||||
label: "{{ item.address }}"
|
||||
|
||||
@@ -59,7 +62,7 @@
|
||||
when: inventory_hostname != _target_node
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- skylab_cluster.address | ansible.netcommon.ipaddr('address') in _node_addresses
|
||||
- skylab_cluster.address.access | ansible.netcommon.ipaddr('address') in _node_addresses
|
||||
- _docker_node_availability | lower == 'active'
|
||||
fail_msg: >-
|
||||
ERROR: Node '{{ inventory_hostname }}' is already marked as unavailable. All cluster
|
||||
@@ -115,15 +118,15 @@
|
||||
|
||||
- name: Delete address from node
|
||||
become: true
|
||||
when: skylab_cluster.address | ansible.netcommon.ipaddr('address') in _node_addresses
|
||||
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('address') in _node_addresses
|
||||
ansible.builtin.command:
|
||||
cmd: ip address delete {{ skylab_cluster.address | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface }}
|
||||
cmd: ip address delete {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
|
||||
changed_when: true
|
||||
|
||||
- name: Assign address to alt node
|
||||
delegate_to: "{{ _target_alt }}"
|
||||
become: true
|
||||
when: skylab_cluster.address | ansible.netcommon.ipaddr('address') not in hostvars[_target_alt]._node_addresses
|
||||
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('address') not in hostvars[_target_alt]._node_addresses
|
||||
ansible.builtin.command:
|
||||
cmd: ip address add {{ skylab_cluster.address | ansible.netcommon.ipaddr('host/prefix') }} dev {{ hostvars[_target_alt].skylab_cluster.interface }}
|
||||
cmd: ip address add {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ hostvars[_target_alt].skylab_cluster.interface.access }}
|
||||
changed_when: true
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
- ansible.builtin.import_playbook: node-down.yaml
|
||||
- ansible.builtin.import_playbook: skylab.core.node_down
|
||||
|
||||
- name: Shutdown node
|
||||
hosts: "{{ node }}"
|
||||
58
skylab/core/playbooks/node_up.yaml
Normal file
58
skylab/core/playbooks/node_up.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Online nodes
|
||||
hosts: cluster
|
||||
vars_prompt:
|
||||
- name: skylab_datastore_encryption_password
|
||||
prompt: Enter datastore block decryption password
|
||||
private: true
|
||||
pre_tasks:
|
||||
- name: Configure remote execution environment
|
||||
ansible.builtin.import_tasks: tasks/meta/bootstrap-remote-env.yaml
|
||||
roles:
|
||||
- role: skylab.core.datastore
|
||||
tasks:
|
||||
- name: Fetch node swarm ID
|
||||
ansible.builtin.command:
|
||||
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
|
||||
changed_when: false
|
||||
register: _docker_node_id_raw
|
||||
|
||||
- name: Update node availability
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_state_dir }}/ansible-runtime/bin/python"
|
||||
community.docker.docker_node:
|
||||
availability: active
|
||||
hostname: "{{ _docker_node_id_raw.stdout.strip() }}"
|
||||
|
||||
- name: Determine node addresses
|
||||
vars:
|
||||
_node_addresses:
|
||||
- "{{ (lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.address + '/' + lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.netmask) | ansible.netcommon.ipaddr('host/prefix') }}"
|
||||
ansible.builtin.set_fact:
|
||||
_node_addresses: "{{ _node_addresses + [(item.address + '/' + item.netmask) | ansible.netcommon.ipaddr('host/prefix')] }}"
|
||||
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4_secondaries }}"
|
||||
loop_control:
|
||||
label: "{{ (item.address + '/' + item.netmask) | ansible.netcommon.ipaddr('host/prefix') }}"
|
||||
|
||||
- name: Determine cluster access addresses
|
||||
run_once: true
|
||||
vars:
|
||||
_cluster_node_ips: []
|
||||
ansible.builtin.set_fact:
|
||||
_cluster_node_ips: "{{ _cluster_node_ips + [hostvars[item].skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix')] }}"
|
||||
loop: "{{ groups.cluster }}"
|
||||
|
||||
- name: Remove alternative node IPs
|
||||
become: true
|
||||
when: item in _cluster_node_ips and item != (skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix'))
|
||||
ansible.builtin.command:
|
||||
cmd: ip address delete {{ item | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
|
||||
changed_when: true
|
||||
loop: "{{ _node_addresses }}"
|
||||
|
||||
- name: Add node IP
|
||||
become: true
|
||||
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') not in _node_addresses
|
||||
ansible.builtin.command:
|
||||
cmd: ip address add {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
|
||||
changed_when: true
|
||||
56
skylab/core/playbooks/provision.yaml
Normal file
56
skylab/core/playbooks/provision.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
- name: Group hosts by platform
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
pre_tasks:
|
||||
- include_tasks: tasks/meta/runtime-group-determination.yaml
|
||||
|
||||
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: linux
|
||||
gather_facts: false
|
||||
tags:
|
||||
- always
|
||||
tasks:
|
||||
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
|
||||
|
||||
|
||||
- name: Configure common settings
|
||||
hosts: linux
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Set hostname
|
||||
become: true
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ skylab_hostname | default(inventory_hostname) }}"
|
||||
use: systemd
|
||||
|
||||
- name: Disable case-sensitive tab-completion
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
line: set completion-ignore-case On
|
||||
path: /etc/inputrc
|
||||
state: present
|
||||
create: true
|
||||
|
||||
- name: Install EPEL repository config
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.yum_repository:
|
||||
name: epel
|
||||
description: Extra Packages for Enterprise Linux
|
||||
baseurl: https://download.fedoraproject.org/pub/epel/$releasever{{ '/Everything' if ansible_distribution_major_version == '8' else '' }}/$basearch/
|
||||
|
||||
- name: Install EPEL GPG key
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.rpm_key:
|
||||
state: present
|
||||
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
|
||||
|
||||
|
||||
- import_playbook: skylab.core.update
|
||||
|
||||
|
||||
- import_playbook: skylab.core.configure
|
||||
53
skylab/core/playbooks/tasks/meta/bootstrap-remote-env.yaml
Normal file
53
skylab/core/playbooks/tasks/meta/bootstrap-remote-env.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Install CentOS 8 python bindings
|
||||
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
state: present
|
||||
name:
|
||||
- libffi-devel
|
||||
- python3-devel
|
||||
- python3-libselinux
|
||||
- python3-policycoreutils
|
||||
- python3-firewall
|
||||
|
||||
- name: Remove legacy state directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /var/run/skylab
|
||||
state: absent
|
||||
|
||||
- name: Create state directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ skylab_state_dir }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Create bootstrap virtualenv
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ ansible_python_interpeter | default(discovered_interpreter_python) }} -m venv {{ skylab_ansible_venv }} --system-site-packages"
|
||||
creates: "{{ skylab_ansible_venv }}/bin/python"
|
||||
|
||||
- name: Pin bootstrap virtualenv pip
|
||||
ansible.builtin.pip:
|
||||
executable: "{{ skylab_ansible_venv }}/bin/pip"
|
||||
name: pip
|
||||
state: present
|
||||
version: "{{ skylab_pip_version }}"
|
||||
|
||||
- name: Copy requirements file to remote
|
||||
ansible.builtin.copy:
|
||||
src: remote-requirements.txt
|
||||
dest: "{{ skylab_ansible_venv }}/requirements.txt"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Install remote requirements
|
||||
ansible.builtin.pip:
|
||||
executable: "{{ skylab_ansible_venv }}/bin/pip"
|
||||
requirements: "{{ skylab_ansible_venv }}/requirements.txt"
|
||||
state: present
|
||||
@@ -6,7 +6,7 @@
|
||||
key: edgeos
|
||||
|
||||
- name: Group supported Linux hosts
|
||||
when: ansible_distribution == "Rocky"
|
||||
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
|
||||
changed_when: false
|
||||
group_by:
|
||||
key: linux
|
||||
@@ -0,0 +1,53 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
networks:
|
||||
adguard:
|
||||
name: adguard
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: "{{ app.networks.ext }}"
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
adguard-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
services:
|
||||
server:
|
||||
image: adguard/adguardhome:{{ app.versions.server }}
|
||||
hostname: adguard
|
||||
networks:
|
||||
- adguard
|
||||
dns:
|
||||
{% for server in app.settings.upstream %}
|
||||
- {{ server }}
|
||||
{% endfor %}
|
||||
ports:
|
||||
- published: {{ app.ports.53 }}
|
||||
target: 53
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.53 }}
|
||||
target: 53
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.8064 }}
|
||||
target: 8064
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: adguard-config
|
||||
target: /opt/adguardhome/conf
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: adguard-data
|
||||
target: /opt/adguardhome/work
|
||||
read_only: false
|
||||
deploy:
|
||||
replicas: 1
|
||||
214
skylab/core/playbooks/templates/docker-compose/bitwarden.yaml.j2
Normal file
214
skylab/core/playbooks/templates/docker-compose/bitwarden.yaml.j2
Normal file
@@ -0,0 +1,214 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
x-global-env: &globalenv
|
||||
LOCAL_UID: "{{ _app_account.uid }}"
|
||||
LOCAL_GID: "{{ _app_account.uid }}"
|
||||
ASPNETCORE_ENVIRONMENT: Production
|
||||
globalSettings__selfHosted: "true"
|
||||
globalSettings__baseServiceUri__vault: https://{{ app.publish.domain }}
|
||||
globalSettings__baseServiceUri__api: https://{{ app.publish.domain }}/api
|
||||
globalSettings__baseServiceUri__identity: https://{{ app.publish.domain }}/identity
|
||||
globalSettings__baseServiceUri__admin: https://{{ app.publish.domain }}/admin
|
||||
globalSettings__baseServiceUri__notifications: https://{{ app.publish.domain }}/notifications
|
||||
globalSettings__baseServiceUri__internalNotifications: http://bitwarden_notifications:5000
|
||||
globalSettings__baseServiceUri__internalAdmin: http://bitwarden_admin:5000
|
||||
globalSettings__baseServiceUri__internalIdentity: http://bitwarden_identity:5000
|
||||
globalSettings__baseServiceUri__internalApi: http://bitwarden_api:5000
|
||||
globalSettings__baseServiceUri__internalVault: http://bitwarden_web:5000
|
||||
globalSettings__pushRelayBaseUri: https://push.bitwarden.com
|
||||
globalSettings__installation__identityUri: https://identity.bitwarden.com
|
||||
globalSettings__sqlServer__connectionString: "Data Source=tcp:mssql,1433;Initial Catalog=vault;Persist Security Info=False;User ID=sa;Password=e934c0bb-3b5a-4e6b-b525-cd6d83004e1a;MultipleActiveResultSets=False;Connect Timeout=30;Encrypt=True;TrustServerCertificate=True"
|
||||
globalSettings__identityServer__certificatePassword: {{ app.settings.certificatePassword }}
|
||||
globalSettings__attachment__baseDirectory: /etc/bitwarden/core/attachments
|
||||
globalSettings__attachment__baseUrl: https://{{ app.publish.domain }}/attachments
|
||||
globalSettings__dataProtection__directory: /etc/bitwarden/core/aspnet-dataprotection
|
||||
globalSettings__logDirectory: /etc/bitwarden/logs
|
||||
globalSettings__licenseDirectory: /etc/bitwarden/core/licenses
|
||||
globalSettings__internalIdentityKey: {{ app.settings.internalIdentityKey }}
|
||||
globalSettings__duo__aKey: {{ app.settings.duo__aKey }}
|
||||
globalSettings__installation__id: {{ app.settings.installation__id }}
|
||||
globalSettings__installation__key: {{ app.settings.installation__key }}
|
||||
globalSettings__yubico__clientId: REPLACE
|
||||
globalSettings__yubico__key: REPLACE
|
||||
globalSettings__mail__replyToEmail: noreply@enp.one
|
||||
globalSettings__mail__smtp__host: REPLACE
|
||||
globalSettings__mail__smtp__port: "587"
|
||||
globalSettings__mail__smtp__ssl: "false"
|
||||
globalSettings__mail__smtp__username: REPLACE
|
||||
globalSettings__mail__smtp__password: REPLACE
|
||||
globalSettings__disableUserRegistration: "false"
|
||||
globalSettings__hibpApiKey: REPLACE
|
||||
adminSettings__admins: ""
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
bitwarden-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
networks:
|
||||
bitwarden_internal:
|
||||
internal: true
|
||||
name: bitwarden_internal
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.internal }}
|
||||
bitwarden_external:
|
||||
internal: false
|
||||
name: bitwarden_external
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.external }}
|
||||
|
||||
|
||||
services:
|
||||
mssql:
|
||||
image: bitwarden/mssql:{{ app.versions.mssql }}
|
||||
stop_grace_period: 60s
|
||||
networks:
|
||||
- bitwarden_internal
|
||||
volumes:
|
||||
- bitwarden-db-data:/var/opt/mssql/data
|
||||
- bitwarden-db-backup:/etc/bitwarden/mssql/backups
|
||||
- bitwarden-logs-db:/var/opt/mssql/log
|
||||
environment:
|
||||
LOCAL_UID: "{{ _app_account.uid }}"
|
||||
LOCAL_GID: "{{ _app_account.uid }}"
|
||||
ACCEPT_EULA: "Y"
|
||||
MSSQL_PID: Express
|
||||
SA_PASSWORD: {{ app.settings.SA_PASSWORD }}
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
web:
|
||||
image: bitwarden/web:{{ app.versions.web }}
|
||||
networks:
|
||||
- bitwarden_internal
|
||||
volumes:
|
||||
- bitwarden-web:/etc/bitwarden/web
|
||||
environment: *globalenv
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
attachments:
|
||||
image: bitwarden/attachments:{{ app.versions.attachments }}
|
||||
networks:
|
||||
- bitwarden_internal
|
||||
volumes:
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
environment: *globalenv
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
api:
|
||||
image: bitwarden/api:{{ app.versions.api }}
|
||||
volumes:
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-api:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
identity:
|
||||
image: bitwarden/identity:{{ app.versions.identity }}
|
||||
volumes:
|
||||
- bitwarden-identity:/etc/bitwarden/identity
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-identity:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
admin:
|
||||
image: bitwarden/admin:{{ app.versions.admin }}
|
||||
depends_on:
|
||||
- mssql
|
||||
volumes:
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-admin:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
icons:
|
||||
image: bitwarden/icons:{{ app.versions.icons }}
|
||||
volumes:
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-icons:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
notifications:
|
||||
image: bitwarden/notifications:1.40.0
|
||||
volumes:
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-notifications:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
events:
|
||||
image: bitwarden/events:{{ app.versions.events }}
|
||||
volumes:
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-events:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
nginx:
|
||||
image: bitwarden/nginx:{{ app.versions.nginx }}
|
||||
depends_on:
|
||||
- web
|
||||
- admin
|
||||
- api
|
||||
- identity
|
||||
ports:
|
||||
- published: {{ app.ports.8080 }}
|
||||
target: 8080
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.8443 }}
|
||||
target: 8443
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- bitwarden-nginx-data:/etc/bitwarden/nginx
|
||||
- bitwarden-ssl:/etc/ssl
|
||||
- bitwarden-logs-nginx:/var/log/nginx
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
52
skylab/core/playbooks/templates/docker-compose/gitea.yaml.j2
Normal file
52
skylab/core/playbooks/templates/docker-compose/gitea.yaml.j2
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
name: gitea
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.ext }}
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
gitea-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
|
||||
services:
|
||||
server:
|
||||
image: gitea/gitea:{{ app.versions.server }}
|
||||
hostname: gitea
|
||||
networks:
|
||||
- gitea
|
||||
ports:
|
||||
- published: {{ app.ports.3000 }}
|
||||
target: 3000
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.22 }}
|
||||
target: 22
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: gitea-data
|
||||
target: /data
|
||||
read_only: false
|
||||
environment:
|
||||
USER_UID: "{{ _app_account.uid }}"
|
||||
USER_GID: "{{ _app_account.uid }}"
|
||||
APP_NAME: ENP Version Control System
|
||||
RUN_MODE: prod
|
||||
ROOT_URL: https://{{ app.publish.domain }}/
|
||||
DB_TYPE: sqlite3
|
||||
DISABLE_REGISTRATION: "true"
|
||||
deploy:
|
||||
replicas: 1
|
||||
99
skylab/core/playbooks/templates/docker-compose/meta.yaml.j2
Normal file
99
skylab/core/playbooks/templates/docker-compose/meta.yaml.j2
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
networks:
|
||||
meta:
|
||||
name: meta
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.ext }}
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
meta-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
|
||||
services:
|
||||
proxy:
|
||||
image: nginx:{{ app.versions.proxy }}
|
||||
hostname: proxy
|
||||
networks:
|
||||
- meta
|
||||
extra_hosts:
|
||||
- "dockerloopback:{{ app.settings.loopback_address }}"
|
||||
ports:
|
||||
- published: {{ app.ports.80 }}
|
||||
target: 80
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.443 }}
|
||||
target: 443
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: meta-nginx
|
||||
target: /etc/nginx
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: meta-letsencrypt-config
|
||||
target: /etc/letsencrypt
|
||||
read_only: true
|
||||
deploy:
|
||||
replicas: 2
|
||||
placement:
|
||||
max_replicas_per_node: 1
|
||||
|
||||
certbot:
|
||||
image: certbot/certbot:{{ app.versions.certbot }}
|
||||
hostname: certbot
|
||||
command: renew --standalone
|
||||
networks:
|
||||
- meta
|
||||
ports:
|
||||
- published: 8088 # This is hardcoded to avoid conflicts
|
||||
target: 80
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: meta-letsencrypt-config
|
||||
target: /etc/letsencrypt
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: meta-letsencrypt-data
|
||||
target: /var/lib/letsencrypt
|
||||
read_only: false
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 24h
|
||||
|
||||
backup:
|
||||
image: rockylinux:latest
|
||||
hostname: backup
|
||||
command: bash /datastore/backup/mkbkup.sh /datastore/
|
||||
networks:
|
||||
- meta
|
||||
volumes:
|
||||
- type: volume
|
||||
source: meta-backup
|
||||
target: /datastore/backup
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: meta-appdata
|
||||
target: /datastore/appdata
|
||||
read_only: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 24h
|
||||
@@ -0,0 +1,55 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
networks:
|
||||
minecraft:
|
||||
name: minecraft
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.ext }}
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
minecraft-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
|
||||
services:
|
||||
server:
|
||||
image: itzg/minecraft-server:latest
|
||||
hostname: minecraft
|
||||
networks:
|
||||
- minecraft
|
||||
ports:
|
||||
- published: {{ app.ports.25565 }}
|
||||
target: 25565
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: minecraft-data
|
||||
target: /data
|
||||
read_only: false
|
||||
environment:
|
||||
EULA: "TRUE"
|
||||
TZ: Americas/New_York
|
||||
VERSION: {{ app.versions.server }}
|
||||
MAX_MEMORY: "8G"
|
||||
MOTD: "A home for buttery companions"
|
||||
MODE: survival
|
||||
OPS: {{ app.settings.admins | default([]) | join(',') }}
|
||||
WHITELIST: "{{ app.settings.users | default([]) | join(',') }}"
|
||||
MAX_BUILD_HEIGHT: "512"
|
||||
SNOOPER_ENABLED: "false"
|
||||
ICON: https://cdn.enp.one/img/logos/e-w-sm.png
|
||||
ENABLE_RCON: "false"
|
||||
UID: "{{ _app_account.uid }}"
|
||||
GID: "{{ _app_account.uid }}"
|
||||
deploy:
|
||||
replicas: 1
|
||||
@@ -0,0 +1,113 @@
|
||||
---
|
||||
version: '3.7'
|
||||
|
||||
volumes:
|
||||
photoprism-database:
|
||||
name: datastore/appdata/photoprism/database
|
||||
driver: glusterfs
|
||||
photoprism-metadata:
|
||||
name: datastore/appdata/photoprism/metadata
|
||||
photoprism-originals:
|
||||
name: datastore/media/photoprism
|
||||
driver: glusterfs
|
||||
photoprism-import:
|
||||
name: datastore/media/upload
|
||||
driver: glusterfs
|
||||
|
||||
networks:
|
||||
photoprism:
|
||||
internal: true
|
||||
name: photoprism
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.109.0/24
|
||||
|
||||
services:
|
||||
app:
|
||||
image: photoprism/photoprism:latest
|
||||
hostname: app
|
||||
depends_on:
|
||||
- database
|
||||
networks:
|
||||
- photoprism
|
||||
ports:
|
||||
- published: 2342
|
||||
target: 2342
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
environment:
|
||||
PHOTOPRISM_ADMIN_PASSWORD: "gm2auW34GNawZ8Dqiub8W8vOlvsHCnfj"
|
||||
PHOTOPRISM_SITE_URL: "http://cluster.skylab.enp.one:2342/"
|
||||
PHOTOPRISM_ORIGINALS_LIMIT: 5000
|
||||
PHOTOPRISM_HTTP_COMPRESSION: "gzip"
|
||||
PHOTOPRISM_DEBUG: "false"
|
||||
PHOTOPRISM_PUBLIC: "false"
|
||||
PHOTOPRISM_READONLY: "false"
|
||||
PHOTOPRISM_EXPERIMENTAL: "false"
|
||||
PHOTOPRISM_DISABLE_CHOWN: "false"
|
||||
PHOTOPRISM_DISABLE_WEBDAV: "false"
|
||||
PHOTOPRISM_DISABLE_SETTINGS: "false"
|
||||
PHOTOPRISM_DISABLE_TENSORFLOW: "false"
|
||||
PHOTOPRISM_DISABLE_FACES: "false"
|
||||
PHOTOPRISM_DISABLE_CLASSIFICATION: "false"
|
||||
PHOTOPRISM_DARKTABLE_PRESETS: "false"
|
||||
PHOTOPRISM_DETECT_NSFW: "false"
|
||||
PHOTOPRISM_UPLOAD_NSFW: "true"
|
||||
PHOTOPRISM_DATABASE_DRIVER: "mysql"
|
||||
PHOTOPRISM_DATABASE_SERVER: "database:3306"
|
||||
PHOTOPRISM_DATABASE_NAME: "photoprism"
|
||||
PHOTOPRISM_DATABASE_USER: "photoprism"
|
||||
PHOTOPRISM_DATABASE_PASSWORD: "KcIKhME9OwWKVz4tGyqI4VXzyDBs33Xp" # MariaDB or MySQL database user password
|
||||
PHOTOPRISM_SITE_TITLE: "Skylab Images"
|
||||
PHOTOPRISM_SITE_CAPTION: "Browse Your Life"
|
||||
PHOTOPRISM_SITE_DESCRIPTION: ""
|
||||
PHOTOPRISM_SITE_AUTHOR: "EN Paul"
|
||||
HOME: "/photoprism"
|
||||
PHOTOPRISM_UID: 1408
|
||||
PHOTOPRISM_GID: 1408
|
||||
## Hardware video transcoding config (optional)
|
||||
# PHOTOPRISM_FFMPEG_BUFFERS: "64" # FFmpeg capture buffers (default: 32)
|
||||
# PHOTOPRISM_FFMPEG_BITRATE: "32" # FFmpeg encoding bitrate limit in Mbit/s (default: 50)
|
||||
# PHOTOPRISM_FFMPEG_ENCODER: "h264_v4l2m2m" # Use Video4Linux for AVC transcoding (default: libx264)
|
||||
# PHOTOPRISM_FFMPEG_ENCODER: "h264_qsv" # Use Intel Quick Sync Video for AVC transcoding (default: libx264)
|
||||
# PHOTOPRISM_INIT: "intel-graphics tensorflow-amd64-avx2" # Enable TensorFlow AVX2 & Intel Graphics support
|
||||
## Enable TensorFlow AVX2 support for modern Intel CPUs (requires starting the container as root)
|
||||
# PHOTOPRISM_INIT: "tensorflow-amd64-avx2"
|
||||
user: "1408:1408"
|
||||
working_dir: "/photoprism"
|
||||
volumes:
|
||||
- type: volume
|
||||
source: photoprism-originals
|
||||
target: /photoprism/originals
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: photoprism-metadata
|
||||
target: /photoprism/storage
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: photoprism-import
|
||||
target: /photoprism/import
|
||||
read_only: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
database:
|
||||
image: mariadb:10.6
|
||||
hostname: database
|
||||
command: mysqld --innodb-buffer-pool-size=128M --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=120
|
||||
networks:
|
||||
- photoprism
|
||||
volumes:
|
||||
- type: volume
|
||||
source: photoprism-database
|
||||
target: /var/lib/mysql
|
||||
read_only: false
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: insecure
|
||||
MYSQL_DATABASE: photoprism
|
||||
MYSQL_USER: photoprism
|
||||
MYSQL_PASSWORD: KcIKhME9OwWKVz4tGyqI4VXzyDBs33Xp
|
||||
deploy:
|
||||
replicas: 1
|
||||
95
skylab/core/playbooks/templates/docker-compose/plex.yaml.j2
Normal file
95
skylab/core/playbooks/templates/docker-compose/plex.yaml.j2
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
networks:
|
||||
plex:
|
||||
name: plex
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.ext }}
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
plex-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
plex-data:
|
||||
name: plex-data
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: nfs
|
||||
o: "addr={{ app.settings.mediastore }},ro"
|
||||
device: ":/nfs/plex"
|
||||
|
||||
|
||||
services:
|
||||
server:
|
||||
image: plexinc/pms-docker:{{ app.versions.server }}
|
||||
hostname: plex-media-server
|
||||
networks:
|
||||
- plex
|
||||
ports:
|
||||
- published: {{ app.ports.32400 }}
|
||||
target: 32400
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.3005 }}
|
||||
target: 3005
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.8324 }}
|
||||
target: 8324
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.32469 }}
|
||||
target: 32469
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.1900 }}
|
||||
target: 1900
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.32410 }}
|
||||
target: 32410
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.32413 }}
|
||||
target: 32413
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.32414 }}
|
||||
target: 32414
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: plex-config
|
||||
target: /config
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: plex-data
|
||||
target: /data
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: plex-personal
|
||||
target: /personal
|
||||
read_only: false
|
||||
environment:
|
||||
TZ: "Americas/New_York"
|
||||
ALLOWED_NETWORKS: {{ app.settings.internal_subnets | join(',') }}
|
||||
PLEX_UID: "{{ _app_account.uid }}"
|
||||
PLEX_GID: "{{ _app_account.uid }}"
|
||||
deploy:
|
||||
replicas: 1
|
||||
placement:
|
||||
{% if app.settings.exclude_hosts is defined %}
|
||||
constraints:
|
||||
{% for host in app.settings.exclude_hosts %}
|
||||
- node.hostname!={{ host }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
70
skylab/core/playbooks/templates/docker-compose/unifi.yaml.j2
Normal file
70
skylab/core/playbooks/templates/docker-compose/unifi.yaml.j2
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
networks:
|
||||
unifi:
|
||||
name: unifi
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.ext }}
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
unifi-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
|
||||
services:
|
||||
wlc:
|
||||
image: jacobalberty/unifi:{{ app.versions.wlc }}
|
||||
hostname: en1-unifi-wlc
|
||||
init: true
|
||||
networks:
|
||||
- unifi
|
||||
ports:
|
||||
- published: {{ app.ports.8080 }}
|
||||
target: 8080
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.8443 }}
|
||||
target: 8443
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.8843 }}
|
||||
target: 8843
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.8880 }}
|
||||
target: 8880
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.3478 }}
|
||||
target: 3478
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.6789 }}
|
||||
target: 6789
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ app.ports.10001 }}
|
||||
target: 10001
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: unifi-data
|
||||
target: /unifi
|
||||
read_only: false
|
||||
environment:
|
||||
RUNAS_UID0: "false"
|
||||
UNIFI_UID: "{{ _app_account.uid }}"
|
||||
UNIFI_GID: "{{ _app_account.uid }}"
|
||||
TZ: "Americas/New_York"
|
||||
deploy:
|
||||
replicas: 1
|
||||
108
skylab/core/playbooks/templates/docker-compose/vikunja.yaml.j2
Normal file
108
skylab/core/playbooks/templates/docker-compose/vikunja.yaml.j2
Normal file
@@ -0,0 +1,108 @@
|
||||
---
|
||||
version: "{{ skylab_compose_version }}"
|
||||
|
||||
|
||||
networks:
|
||||
vikunja:
|
||||
name: vikunja
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ app.networks.ext }}
|
||||
|
||||
|
||||
volumes:
|
||||
{% for key, value in app.volumes.items() %}
|
||||
vikunja-{{ key }}:
|
||||
name: datastore{{ value }}
|
||||
driver: glusterfs
|
||||
{% endfor %}
|
||||
|
||||
|
||||
services:
|
||||
database:
|
||||
image: mariadb:{{ app.versions.database }}
|
||||
hostname: database
|
||||
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
|
||||
networks:
|
||||
- vikunja
|
||||
volumes:
|
||||
- type: volume
|
||||
source: vikunja-database
|
||||
target: /var/lib/mysql
|
||||
read_only: false
|
||||
environment:
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: "true"
|
||||
MYSQL_USER: vikunja
|
||||
MYSQL_PASSWORD: {{ app.settings.database_password }}
|
||||
MYSQL_DATABASE: vikunja
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
cache:
|
||||
image: redis:{{ app.versions.cache }}
|
||||
hostname: cache
|
||||
networks:
|
||||
- vikunja
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
proxy:
|
||||
image: nginx:{{ app.versions.proxy }}
|
||||
hostname: proxy
|
||||
networks:
|
||||
- vikunja
|
||||
ports:
|
||||
- published: {{ app.ports.80 }}
|
||||
target: 80
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: vikunja-nginx
|
||||
target: /etc/nginx/conf.d
|
||||
read_only: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
api:
|
||||
image: vikunja/api:{{ app.versions.api }}
|
||||
hostname: api
|
||||
networks:
|
||||
- vikunja
|
||||
depends_on:
|
||||
- database
|
||||
- cache
|
||||
volumes:
|
||||
- type: volume
|
||||
source: vikunja-files
|
||||
target: /app/vikunja/files
|
||||
read_only: false
|
||||
environment:
|
||||
VIKUNJA_DATABASE_HOST: database
|
||||
VIKUNJA_DATABASE_PASSWORD: {{ app.settings.database_password }}
|
||||
VIKUNJA_DATABASE_TYPE: mysql
|
||||
VIKUNJA_DATABASE_USER: vikunja
|
||||
VIKUNJA_DATABASE_DATABASE: vikunja
|
||||
VIKUNJA_REDIS_ENABLED: "1"
|
||||
VIKUNJA_REDIS_HOST: cache:6379
|
||||
VIKUNJA_CACHE_ENABLED: "1"
|
||||
VIKUNJA_CACHE_TYPE: redis
|
||||
VIKUNJA_FILES_MAXSIZE: 50MB
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
web:
|
||||
image: vikunja/frontend:{{ app.versions.web }}
|
||||
hostname: web
|
||||
networks:
|
||||
- vikunja
|
||||
depends_on:
|
||||
- database
|
||||
- cache
|
||||
- proxy
|
||||
environment:
|
||||
VIKUNJA_API_URL: https://{{ app.publish.domain }}/api/v1
|
||||
deploy:
|
||||
replicas: 1
|
||||
34
skylab/core/playbooks/templates/stack-nginx.conf.j2
Normal file
34
skylab/core/playbooks/templates/stack-nginx.conf.j2
Normal file
@@ -0,0 +1,34 @@
|
||||
# Ansible managed file - do not manually edit
|
||||
#
|
||||
server {
|
||||
server_name {{ app.publish.domain }};
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
location / {
|
||||
proxy_pass http://dockerloopback:{{ app.publish.http }}/;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
listen 443 ssl;
|
||||
ssl_certificate /etc/letsencrypt/live/{{ app.publish.domain }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ app.publish.domain }}/privkey.pem;
|
||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name {{ app.publish.domain }};
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
proxy_pass http://dockerloopback:8088/.well-known/acme-challenge/;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
# EOF
|
||||
@@ -7,6 +7,15 @@
|
||||
- include_tasks: tasks/meta/runtime-group-determination.yaml
|
||||
|
||||
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: linux
|
||||
gather_facts: false
|
||||
tags:
|
||||
- always
|
||||
tasks:
|
||||
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
|
||||
|
||||
|
||||
- name: Update system
|
||||
hosts: linux
|
||||
tags:
|
||||
@@ -15,12 +24,38 @@
|
||||
- vars/packages.yaml
|
||||
tasks:
|
||||
- name: Update system packages via DNF
|
||||
when: ansible_distribution == "Rocky"
|
||||
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: "*"
|
||||
state: latest
|
||||
|
||||
- name: Install global bash config
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: global.sh
|
||||
dest: /etc/profile.d/ZZ-skylab-global.sh
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Install universal packages on Rocky
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ skylab_packages_global + skylab_packages_rocky }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Install universal packages on Fedora
|
||||
when: ansible_distribution == "Fedora"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: "{{ skylab_packages_global + skylab_packages_fedora }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
|
||||
- name: Update unix accounts
|
||||
hosts: linux
|
||||
tags:
|
||||
@@ -49,38 +84,55 @@
|
||||
cmd: 'grep "{{ skylab_group.name }}:" /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
|
||||
register: _existing_skylab_accounts
|
||||
|
||||
- name: Delete removed user accounts
|
||||
become: true
|
||||
- name: Determine deleted skylab users
|
||||
vars:
|
||||
_deleted_accounts: []
|
||||
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
|
||||
ansible.builtin.user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
ansible.builtin.set_fact:
|
||||
_deleted_accounts: "{{ _deleted_accounts + [item] }}"
|
||||
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
|
||||
|
||||
- name: Delete removed user groups
|
||||
become: true
|
||||
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
|
||||
ansible.builtin.group:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
|
||||
- name: Delete accounts
|
||||
when: _deleted_accounts | default(false)
|
||||
block:
|
||||
- name: Delete removed user accounts
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _deleted_accounts }}"
|
||||
|
||||
- name: Delete removed user home directories
|
||||
become: true
|
||||
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
|
||||
ansible.builtin.file:
|
||||
path: "/home/{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
|
||||
- name: Delete removed user groups
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _deleted_accounts }}"
|
||||
|
||||
- name: Delete removed user home directories
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "/home/{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _deleted_accounts }}"
|
||||
|
||||
- name: Determine active users
|
||||
when: item.targets | default([]) | intersect(skylab_targets)
|
||||
vars:
|
||||
_active_accounts: []
|
||||
ansible.builtin.set_fact:
|
||||
_active_accounts: "{{ _active_accounts + [item] }}"
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Create account groups
|
||||
when: item.targets | intersect(skylab_targets)
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: "{{ item.name }}"
|
||||
gid: "{{ item.uid }}"
|
||||
state: present
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
@@ -88,17 +140,16 @@
|
||||
ansible.builtin.set_fact:
|
||||
_determined_member_groups: "{{ _determined_member_groups | default({}) | combine({item.name: [
|
||||
skylab_group.name,
|
||||
'wheel' if (item.admin | default(false) and ansible_distribution == 'Rocky') else '',
|
||||
'wheel' if (item.admin | default(false) and ansible_os_family == 'RedHat') else '',
|
||||
'sudo' if (item.admin | default(false) and ansible_os_family == 'Debian') else '',
|
||||
skylab_group_admin.name if item.admin | default(false) else '',
|
||||
skylab_group_automation.name if item.service | default(false) else '',
|
||||
]}) }}"
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Create accounts
|
||||
when: item.targets | intersect(skylab_targets)
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.name }}"
|
||||
@@ -108,9 +159,13 @@
|
||||
groups: "{{ _determined_member_groups[item.name] }}"
|
||||
comment: "{{ item.fullname | default('') }}"
|
||||
system: "{{ item.service | default(false) }}"
|
||||
generate_ssh_key: false
|
||||
generate_ssh_key: true
|
||||
ssh_key_bits: 4096
|
||||
ssh_key_passphrase: "{{ item.password }}"
|
||||
ssh_key_comment: "{{ item.name }}@{{ inventory_hostname }}"
|
||||
ssh_key_type: ed25519
|
||||
password: "{{ item.password }}"
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
@@ -122,17 +177,10 @@
|
||||
group: "{{ item.name }}"
|
||||
owner: "{{ item.name }}"
|
||||
mode: 0700
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Enforce root password
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: root
|
||||
password: "{{ skylab_root_password }}"
|
||||
state: present
|
||||
|
||||
- name: Create SSH directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
@@ -141,31 +189,36 @@
|
||||
group: "{{ item.name }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Update authorized keys
|
||||
become: true
|
||||
when: item.targets | intersect(skylab_targets)
|
||||
ansible.builtin.authorized_key:
|
||||
user: "{{ item.name }}"
|
||||
key: "{{ skylab_ssh_keys[item.name] | join('\n') }}"
|
||||
state: present
|
||||
exclusive: true
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Enforce ownership of authorized keys
|
||||
become: true
|
||||
when: item.targets | intersect(skylab_targets)
|
||||
ansible.builtin.file:
|
||||
path: /home/{{ item.name }}/.ssh/authorized_keys
|
||||
state: file
|
||||
owner: "{{ item.name }}"
|
||||
group: "{{ item.name }}"
|
||||
mode: 0400
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop: "{{ _active_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Enforce root password
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: root
|
||||
password: "{{ skylab_root_password }}"
|
||||
state: present
|
||||
64
skylab/core/playbooks/vars/access.yaml
Normal file
64
skylab/core/playbooks/vars/access.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
skylab_accounts:
|
||||
- name: enpaul
|
||||
uid: 1300
|
||||
fullname: Ethan N. Paul
|
||||
targets: [network, datastore, cluster, cloud, workstation]
|
||||
admin: true
|
||||
password: $6$H7jZEL2Ey93zfMTD$CzUlZkXDudPHgUMU/OFUn8/Yhzo8nBxoSI8thD15toIFlWN.iUfq/Jp5z3KpDCGTxyv.IbRTvE8dOVWjoRfgJ.
|
||||
|
||||
- name: ansible
|
||||
uid: 1400
|
||||
targets: [network, datastore, cluster, cloud, workstation]
|
||||
admin: true
|
||||
service: true
|
||||
password: $6$qNKmYg4y9YS4f5Gr$m0mAqEVbymPguj.1cS.pfclt33Okfmn1KhFC0r1iQ3eVvz/OIZY3x0qGmPnJ1zOXDWyKKs5hnlGTAeZgCh49C.
|
||||
|
||||
- name: autocraft
|
||||
uid: 1401
|
||||
service: true
|
||||
password: $6$j8aWjPweCWmNT4cZ$F7puAjVES60a5mkBg1WfYMYIGbLrs8FFowf5BPzVo9qgbMYEC53i8rHezDACZjHmOxj5PhJkmZjHD4vfbf3PC1
|
||||
|
||||
- name: autotea
|
||||
uid: 1402
|
||||
service: true
|
||||
password: $6$NdUiEi2P8TI.2jgb$ai1WbWno8QK6Wg/KAV4IacwG92FctN5aQX2i45a9DSCg8k1nkwGj5gc230FXePh8P7vzQ6ifYrYsAvEfZ1J8B.
|
||||
|
||||
- name: autowarden
|
||||
uid: 1403
|
||||
service: true
|
||||
password: $6$a18IBPw40.ELiuy5$RbGfNGUe9iTA6bOaKLsp7q3X3uQ3D1LP8LAF5ioQAafimVvCtwyG.O4Colo9nsl2yoCF6TMIHX9zOTavkbg7W0
|
||||
|
||||
- name: autoguard
|
||||
uid: 1404
|
||||
service: true
|
||||
password: $6$PLf4ifrrk0/5AF21$ohZXy0xDzyUiHXwoCW8zvbrPHFiWSWxYx2.QlDy09wND7RbPhwxghHS0trWWVdy14jAxU45mz5YvdAl7qmEIO0
|
||||
|
||||
- name: autoplex
|
||||
uid: 1405
|
||||
service: true
|
||||
password: $6$VfMCenzm5UPHrpNN$yQIpnQUZPhO9UoSylaOxR6TOrJfR5dFdzdRFzle7dP/bfeDBKxC6hsy52IEowilL.aCbYevz67R9s1hB3q9GU1
|
||||
|
||||
- name: autounifi
|
||||
uid: 1406
|
||||
service: true
|
||||
password: $6$ScrKQGmAifGVDovx$wuQQhEdNya8Tjj/oTeY/eT1grLl17hSYbVeWIIqU.jv.W9vFyoVkxeE/lBcPvBe8XdGjOxWu32WsnomL8irb11
|
||||
|
||||
- name: autollama
|
||||
uid: 1407
|
||||
service: true
|
||||
password: $6$lEdCBbqlWIdHuRZZ$Pr9SAybk7uCTfzjtCpSe7RrwM2TKqr8vWtLDARZRQ9e1RpNKHP2bEvkeg2VPc7oACVfxbg7Y8PP0rKOR.3fcD.
|
||||
|
||||
skylab_group:
|
||||
name: skylab
|
||||
gid: 1200
|
||||
|
||||
skylab_group_admin:
|
||||
name: skylab_admin
|
||||
gid: 1201
|
||||
|
||||
skylab_group_automation:
|
||||
name: skylab_auto
|
||||
gid: 1202
|
||||
|
||||
skylab_root_password: $6$FDwVi2DUVPg.LSrC$vRMIW6ah0x5cSZFLDrV2FuiwoUtYgcnJJV06gn2HxLsUnkXJ0/Sv1hjRn8v6bZy1AmkDCyQCtT6DHRRBuQspx.
|
||||
@@ -6,6 +6,7 @@ skylab_packages_global:
|
||||
- gcc
|
||||
- gcc-c++
|
||||
- git
|
||||
- jq
|
||||
- make
|
||||
- nano
|
||||
- openssl-devel
|
||||
@@ -26,3 +27,9 @@ skylab_packages_rocky:
|
||||
- python3-virtualenv
|
||||
- systemd-networkd
|
||||
- wget
|
||||
|
||||
skylab_packages_fedora:
|
||||
- bind-utils
|
||||
- nc
|
||||
- nfs-utils
|
||||
- wget
|
||||
240
skylab/core/playbooks/vars/services.yaml
Normal file
240
skylab/core/playbooks/vars/services.yaml
Normal file
@@ -0,0 +1,240 @@
|
||||
---
|
||||
skylab_services:
|
||||
meta:
|
||||
networks:
|
||||
ext: 192.168.99.0/24
|
||||
volumes:
|
||||
nginx: /appdata/nginx
|
||||
letsencrypt-config: /appdata/letsencrypt/config
|
||||
letsencrypt-data: /appdata/letsencrypt/data
|
||||
ports:
|
||||
80: 80
|
||||
443: 443
|
||||
versions:
|
||||
proxy: latest
|
||||
certbot: latest
|
||||
settings:
|
||||
loopback_address: 192.168.255.255
|
||||
|
||||
minecraft:
|
||||
user: autocraft
|
||||
networks:
|
||||
ext: 192.168.102.0/24
|
||||
volumes:
|
||||
data: /appdata/minecraft
|
||||
ports:
|
||||
25565: 25565
|
||||
versions:
|
||||
server: 1.16.5
|
||||
publish:
|
||||
domain: mcs.enp.one
|
||||
settings:
|
||||
admins:
|
||||
- ScifiGeek42
|
||||
users:
|
||||
- ScifiGeek42
|
||||
- fantasycat256
|
||||
- CoffeePug
|
||||
- Snowdude21325
|
||||
- KaiserSJR
|
||||
- glutenfreebean
|
||||
|
||||
gitea:
|
||||
user: autotea
|
||||
networks:
|
||||
ext: 192.168.103.0/24
|
||||
volumes:
|
||||
data: /appdata/gitea
|
||||
ports:
|
||||
3000: 3000
|
||||
22: 2222
|
||||
publish:
|
||||
domain: vcs.enp.one
|
||||
http: 3000
|
||||
versions:
|
||||
server: 1.15.4
|
||||
|
||||
bitwarden:
|
||||
user: autowarden
|
||||
networks:
|
||||
internal: 192.168.104.0/24
|
||||
external: 192.168.105.0/24
|
||||
volumes:
|
||||
db-data: /appdata/bitwarden/mssql/data
|
||||
db-backup: /appdata/bitwarden/mssql/backup
|
||||
nginx-data: /appdata/bitwarden/nginx
|
||||
web: /appdata/bitwarden/web
|
||||
ssl: /appdata/bitwarden/ssl
|
||||
ca-certs: /appdata/bitwarden/ca-certificates
|
||||
core: /appdata/bitwarden/core
|
||||
identity: /appdata/bitwarden/identity
|
||||
logs-api: /appdata/bitwarden/logs/api
|
||||
logs-db: /appdata/bitwarden/logs/mssql
|
||||
logs-identity: /appdata/bitwarden/logs/identity
|
||||
logs-nginx: /appdata/bitwarden/logs/nginx
|
||||
logs-admin: /appdata/bitwarden/logs/admin
|
||||
logs-icons: /appdata/bitwarden/logs/icons
|
||||
logs-notifications: /appdata/bitwarden/logs/notifications
|
||||
logs-events: /appdata/bitwarden/logs/events
|
||||
ports:
|
||||
8080: 8090
|
||||
8443: 8943
|
||||
versions:
|
||||
mssql: 1.40.0
|
||||
web: 2.19.0
|
||||
attachments: 1.40.0
|
||||
api: 1.40.0
|
||||
identity: 1.40.0
|
||||
admin: 1.40.0
|
||||
icons: 1.40.0
|
||||
events: 1.40.0
|
||||
nginx: 1.40.0
|
||||
publish:
|
||||
domain: ssv.enp.one
|
||||
http: 8090
|
||||
settings:
|
||||
certificatePassword: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
34336462333965626665636664636338353139306135393862656539623935666134666638313632
|
||||
6337393734353237373233663763666566316637393436650a346134353365626637313732346565
|
||||
64373866633430613637663230383866336362313739313335646330373666353536396463376364
|
||||
3730306338623831300a346565613730326138333732306237333236393237653363386263376531
|
||||
30653663346234383538316337386534356534316437323561646637636361396462393335316233
|
||||
3931623037626539636535353963666635316334613833396437
|
||||
internalIdentityKey: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64343365323264303635306461386464626535343138333637333035343365386138363261666561
|
||||
3036376532316230326238626662663434343131393336350a363230333637373231333332356230
|
||||
66383466626139396365333865663538386130633136643861353936613330613535313363323639
|
||||
6538656632376330380a373534393361613234366536353866353366646263643565346534393235
|
||||
30623261626364613063353839663130656436316531666431316332653330636436323331316462
|
||||
3539383064363338313433343837363563313838333231363639
|
||||
duo__aKey: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
38353861643436373461393663616366383139393164366664303333333431663364613530323532
|
||||
3434643335353964613464393734623934313164663339340a303831353734623332316464333735
|
||||
34343961393562366435653935313038336638623061353761343538333264386638306363386463
|
||||
3339346561333039650a353163633263386232646366323637383866303033356631376639383561
|
||||
36316333336434393364316565353363623036613233633933616532376530653138366432303762
|
||||
6532343435636261353434323461646365396538646466353032
|
||||
installation__id: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
62323837366638363735393462326566633235356261326636623239366462316465636163663063
|
||||
3065613765386138653239383332306363346236666662660a313634333334396633646465356435
|
||||
66666231633938613838663364323331666434383439303931393761313563663931386532336330
|
||||
6433383331643933610a323565636462663865666435376334346535323964663264363039623364
|
||||
32653966363634376534383664663535373830366466336463613365653463363663316165303330
|
||||
3834653063653334313931643330663163386638363938643130
|
||||
installation__key: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
38353130336136623437653131316461653561393539373630623135383036643135623361613735
|
||||
6431306133623866613836363361376163656434343230660a663635393861333863376461336661
|
||||
30386562353730326665323030393531663234373430363639306562633031363065386665646431
|
||||
3163633239366630300a313436386131376433333231346135393735373236626365393533626232
|
||||
61313536323437363234396536623662613434333363326565303939363562353732
|
||||
SA_PASSWORD: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64313236346631366338313139396532346461333835616466313037363132656632323566663138
|
||||
6665393239656262363261303362303437343438626234340a663836623362353431373035356562
|
||||
61383865303835323336363862303035363161376336346563323966633361333966363232393665
|
||||
6166323331353065380a616138303531643063653633656561383761393433646130656432363436
|
||||
62383763316130306235396338356236636263653830666139663064626633643635386237373034
|
||||
3465323836373437383465316537666337373134616135626238
|
||||
|
||||
adguard:
|
||||
user: autoguard
|
||||
networks:
|
||||
ext: 192.168.108.0/24
|
||||
volumes:
|
||||
config: /appdata/adguard/config
|
||||
data: /appdata/adguard/data
|
||||
ports:
|
||||
53: 53
|
||||
8064: 8084
|
||||
versions:
|
||||
server: v0.106.3
|
||||
publish:
|
||||
domain: adguard.en1.local
|
||||
http: 8064
|
||||
settings:
|
||||
upstream:
|
||||
- 1.1.1.1
|
||||
- 1.0.0.1
|
||||
|
||||
plex:
|
||||
user: autoplex
|
||||
networks:
|
||||
ext: 192.168.101.0/24
|
||||
volumes:
|
||||
config: /appdata/plex
|
||||
ports:
|
||||
32400: 32400
|
||||
3005: 3005
|
||||
8324: 8324
|
||||
32469: 32469
|
||||
1900: 1900
|
||||
32410: 32410
|
||||
32413: 32413
|
||||
32414: 32414
|
||||
versions:
|
||||
server: latest
|
||||
publish:
|
||||
domain: pms.enp.one
|
||||
http: 32400
|
||||
settings:
|
||||
mediastore: mediastore.skylab.enp.one
|
||||
internal_subnets:
|
||||
- 10.42.100.0/24
|
||||
- 10.42.101.0/24
|
||||
exclude_hosts:
|
||||
- jupiter.net.enp.one
|
||||
- pegasus.skylab.enp.one
|
||||
|
||||
unifi:
|
||||
user: autounifi
|
||||
networks:
|
||||
ext: 192.168.100.0/24
|
||||
volumes:
|
||||
data: /appdata/unifi
|
||||
ports:
|
||||
8080: 8080
|
||||
8443: 8443
|
||||
8843: 8843
|
||||
8880: 8880
|
||||
3478: 3478
|
||||
6789: 6789
|
||||
10001: 10001
|
||||
versions:
|
||||
wlc: "6.2"
|
||||
publish:
|
||||
domain: unifi.en1.local
|
||||
http: 8080
|
||||
|
||||
vikunja:
|
||||
user: autollama
|
||||
networks:
|
||||
ext: 192.168.107.0/24
|
||||
volumes:
|
||||
database: /appdata/vikunja/database
|
||||
files: /appdata/vikunja/files
|
||||
nginx: /appdata/vikunja/nginx
|
||||
ports:
|
||||
80: 8087
|
||||
versions:
|
||||
database: "10"
|
||||
cache: latest
|
||||
proxy: latest
|
||||
api: 0.18.1
|
||||
web: 0.18.1
|
||||
publish:
|
||||
domain: org.enp.one
|
||||
http: 8087
|
||||
settings:
|
||||
database_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35313866386161376430383232343834633566363136323761316531663633383231653135313565
|
||||
6332336461356164623237306436393131383566656233640a316262616161336331356565363963
|
||||
35313430303237313039346162653564623236373564306333393362623134613437656231633635
|
||||
6334616138663036610a646234366264646363353635356338633035373166343763353733336339
|
||||
38663937383165386530326138363965626666386366636330343133633238636236316432613136
|
||||
6662313533316563646461646336396430306466323831613730
|
||||
@@ -2,8 +2,8 @@
|
||||
skylab_ssh_keys:
|
||||
enpaul:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDi9rWRC15og/+3Jc3AMHOrlIc2BHaAX9PLbklz3qfFtSOp9bIScMcH5ZR6lyVSgP8RCEjI5HuZejDUJTXUQYfvEJeno//vmxn9Vw66lpMz/FSJ3JcDbjDVI1pe3A8mTOAS+AoVOEzCUOJVZJvdI272Hgf+QRBu+ONQ12u+2XYdVfLFDe7mAG+vEJRBatwb8B7Al+/LUpIrCuPm9PzMBtCMFjWGaqQgnyJYRSPIGxz9231XIjwhHLOQG1R0jLGuS37X+J49Y5JYDaHf9q9KH76GjdO2rOq6aGvwN93Y4Z+D2hMOklhD0Ez/ZE+I3ZUPV0e5pF28gsA6L7gTeqmSGpQaKdwjCUoU12VM70OVxng5p2+7DIc0k2np7rnvd4zybgn9OMM+TIO5M3c6ocDuNsEmRgfS3V99X5oh9qNy35UdBXV08j0wFoUo1KcyGwyNBYzKzvkkvtgJezVKmqSPKeBjMgMX4UsJsMn27Zosk0ZgoUwLFPO9Pg7uShncwgsTnvYDR1ws53PV832gc7A85ud/dC9Fjn6jBsMQaCFbiZktc5J8mv3cugQHQesbq8Y2aNVRu+ECb+KUvAEdPacWdBOkk0IvZ4PvLrAs2xehF6FYVqKVtPlJMaUAAwj9vVx7Nl2HnsSRIrCgxsMOTOhbbp/3NrvM8r6K7zKBzXg2TNgQeQ== enpaul@ph-1
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8K74AXz6LBYfoXRbY1pKP+1rvi5BpjFOkg1uYuMM+q78toagSREpLrbjvPtoL8fkEFox4T9TUaME08ZP8560R6iRk35extTTiXquflEwvpsF+e9Lv8E712s/1ydJpkYoux1OohE4W5D9DjVMEW1tjXeb+9aDUcVml6YKMpKLpjEIVanyjHMN13XgswKZGoK3mVMnWxE36fbGVVfiFCvMr/BpjqGShRCxmvldzgq76i1BpTKi6omOjvpgRQcUJcDhYcHAUVSlNccgGLmlAPiUillA//pk84yczzH1dQBB6571Ab5ldoUDBU/hJ0W27aeOfrepup4hNuUt2oux+zAn+uetAuAWKU2Kd0Oo6L5IKObbAQLI0CXfyrmHlrYXwTyNMFleeOdw7s9lf2ra3YCYVXfMALdE6pp+HJLBxzg9kMBbTp6zkV7ZKi75AQhyBJA0s4+vRUccBtJit3Tls+aw/3rd9Dt9lLaXkE80khoKsUI0eiuXtPDlurGxkpcTe6Al/lChNA19pdKEUsBmhD6UUvMTYWlApmta/+xf0wDsSyHqJcnIGx8Qdtg3c4j1Ch+WXwbdOwk8XJrL0atWmv2ium1ll/arO2NFBmbwG1LG/lzJ1k/DoAiHrKrb1HdlwDk0O/7xF/zyC2mfVZKO36+g4XlA7wDJc0tB5vIymlEy2rw== enpaul@serico-nox
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHUXG01NQ7IahqBwLx+lqvbnhU5Cynt+Y4xCk2u5pwjU2KwKyRfqc5LUROU4mcFLuA1mP9uoMaYmiJ/rIWvM/A9OVplilCoSKXxWJW09fR/rOPmCn7pOR+CpFXBNOiRR0WoiDeu1D0tifbSuK/qOxKy6Lp9MH20Ma46d89xA57L9LX+B5CrwF9fR2FXIQGojCiFFyByaUgmzuDMi5mCafm3XlqaR1/wcqoA1YwqFFiGR3gVylSbOmB/Q4GnnyLpBbcYAc5AQnbnD4LlM5biEsLNy7vtQj9s0SeloUkzsJ112dNozdwTI/tOWbINVM+o3AH4B2baTQayWK/UrG9sivjHgEz5Jk5A4xAbUWC1MrH7WHo7vevHu4AT+DiPLkmHli9Ztu0DqJuenheJDyRLfWwDPvIpoY9/AsbVZ/UXqRVbfIB4jV00IHneEg4zj0AdWbSHDz55BZ23JItpuU4i37cO9Cbo2tQYqZjgM5VAlZXwhNPUF7pxWJJxGFqiB5MTQM0LZcrmXpToxBPa0BSDmIcjvLP6NQWk1u+Fdjunyx/q9Gmlc3vFtFEz7swWCuKp4DavyUXFeWwSKt4dDRZyPPdSYrKxDPCncSaKeCv+G5sx4RyQLJjpx14tisnnZP0O5b5S6j3PfGzjgnNBhzl/xIzM5moUqPF7R2laOKh9CBdoQ== enpaul@vigil-nox
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8K74AXz6LBYfoXRbY1pKP+1rvi5BpjFOkg1uYuMM+q78toagSREpLrbjvPtoL8fkEFox4T9TUaME08ZP8560R6iRk35extTTiXquflEwvpsF+e9Lv8E712s/1ydJpkYoux1OohE4W5D9DjVMEW1tjXeb+9aDUcVml6YKMpKLpjEIVanyjHMN13XgswKZGoK3mVMnWxE36fbGVVfiFCvMr/BpjqGShRCxmvldzgq76i1BpTKi6omOjvpgRQcUJcDhYcHAUVSlNccgGLmlAPiUillA//pk84yczzH1dQBB6571Ab5ldoUDBU/hJ0W27aeOfrepup4hNuUt2oux+zAn+uetAuAWKU2Kd0Oo6L5IKObbAQLI0CXfyrmHlrYXwTyNMFleeOdw7s9lf2ra3YCYVXfMALdE6pp+HJLBxzg9kMBbTp6zkV7ZKi75AQhyBJA0s4+vRUccBtJit3Tls+aw/3rd9Dt9lLaXkE80khoKsUI0eiuXtPDlurGxkpcTe6Al/lChNA19pdKEUsBmhD6UUvMTYWlApmta/+xf0wDsSyHqJcnIGx8Qdtg3c4j1Ch+WXwbdOwk8XJrL0atWmv2ium1ll/arO2NFBmbwG1LG/lzJ1k/DoAiHrKrb1HdlwDk0O/7xF/zyC2mfVZKO36+g4XlA7wDJc0tB5vIymlEy2rw== enpaul@discovery
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ3ron1rnTp4t9iyB2VGY5jNuOuJcOgZD3KewjPqOijA enpaul@voyager
|
||||
ansible:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDN/E/zjwCruZf+mboBeBAtNFBInWMvdm6TjwpIg6jVoGgRmYQxEfIcqDSfBTQQ0gbyE3udTdTUDRLRu7ADktzL9J0IepEaYARfz8SANS/Hx2MR0NNy8DArrSFJCOillDA7E7UmNPIPeQl0O76p2ZjEDy6qZnTiW8eOtD7LCzJp4eGJanUPFhag8f4aSNbmiHGR25Zuk82w2/+KrqiI3gO0+jNlnPBf+XHNnFbtUIroupRfxgLdk1OahmkWHTSHkDtXiYrWIISarrHCgVqHTHo1KIX5+MPOH4S5VLB1kaY/O7+g/XlFrAciw8m0zjyBq0ILb+YTSrL9PYnSBtnHAVGJv2bB+TgCfF/nhQGqoqBqqQHFnX0y3JygmDTJMO+aE5wlvI5Laki7EHYPU4fL+Ge76l/dG9j2anw4/iHklbfk1UOxnLvJl593GAlILg1Kd8xx9VfYzVZ7GZym2zq3NI4uQ77T1H4iGoE67zarkn3peKacjX/KARq4weVvs3irHIHibnIuh/TGcS4eiQoNdPxsSA2wRKB6jeuXiV65F1rUDNGs80wcJmsAbZN8/u9Tt0o/Xc+L/LVhV0yrSeBUxzXtlaS+RfcteBXByO3xfC112Cj5grKVki5xWN9AY42Y6JhT3OyiO33dKUMEF/KfiEWWAfvQr/t1CI/rdcEbv3pyUw== enpaul@serico-nox-ansible
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDIbXaUzVqDDKDSC1iO/nOmxIcJoOoMvXE+CJRobEdpkkYSBmlPfburJvGMDMQ3O/88OfgBrG5S7HKlbgVPGEII0Vpsk5iwzOk5Tmo03nLz02Ilx1xXYxTrjUSwnexzbHpluHmqunKEIUVTMHpDz2m4UPgZ4ECsGp9/6n6+n//uLeJ4fQUO9x4L+VMbpDrtfpKN7/P6U30XBIb9bZuKznVPtqTmCy/BFkxTkIn9QKqDh5d49FY/xkOjy1K9zTWb78DFzBRf4sGEykrp19N6inL0eRstGSZAKhqL+qdRXOy/7n6l0u+CdXWl9ZFVXRFhVdAhYOgkEvtuqoasK1Fk3OMqP6SflFanuDiFBostfgfrf8SUV+7CFvOuSpEWgTqx/jPFZV4Vr6wx5ZFVs02OzZ6TJFaEHaLvOE/R3iLOiuFcvqVNpvstLiyiigsj1+DwhDJcwOr3DaEsNdUbv0BTI2P03wtHJtBQw5CaVr5zCBDEeUsL0bBVQdq+6d0NT+CPJNxSZlTmmrBBbgkpupxdnmX6VVBYfXnylsE8UZxY1d7yxba3+Wzp2yvlr2MVocwQmMTPEqimIsW0hsQ8iXi1nrDXecSojlDAeu+LBFuaCxO8H59GrrVWVTI2dAPLEcP+stNGLHqKZuh62t5TnmxuMMi0SY6jH7KiKmusD4fYafzrlQ== enpaul@vigil-nox-ansible
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP5TGKururOa1Y+cbv8AWXYI5zhfZCDV0fsBG+33IYUc enpaul@ansible.voyager
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDN/E/zjwCruZf+mboBeBAtNFBInWMvdm6TjwpIg6jVoGgRmYQxEfIcqDSfBTQQ0gbyE3udTdTUDRLRu7ADktzL9J0IepEaYARfz8SANS/Hx2MR0NNy8DArrSFJCOillDA7E7UmNPIPeQl0O76p2ZjEDy6qZnTiW8eOtD7LCzJp4eGJanUPFhag8f4aSNbmiHGR25Zuk82w2/+KrqiI3gO0+jNlnPBf+XHNnFbtUIroupRfxgLdk1OahmkWHTSHkDtXiYrWIISarrHCgVqHTHo1KIX5+MPOH4S5VLB1kaY/O7+g/XlFrAciw8m0zjyBq0ILb+YTSrL9PYnSBtnHAVGJv2bB+TgCfF/nhQGqoqBqqQHFnX0y3JygmDTJMO+aE5wlvI5Laki7EHYPU4fL+Ge76l/dG9j2anw4/iHklbfk1UOxnLvJl593GAlILg1Kd8xx9VfYzVZ7GZym2zq3NI4uQ77T1H4iGoE67zarkn3peKacjX/KARq4weVvs3irHIHibnIuh/TGcS4eiQoNdPxsSA2wRKB6jeuXiV65F1rUDNGs80wcJmsAbZN8/u9Tt0o/Xc+L/LVhV0yrSeBUxzXtlaS+RfcteBXByO3xfC112Cj5grKVki5xWN9AY42Y6JhT3OyiO33dKUMEF/KfiEWWAfvQr/t1CI/rdcEbv3pyUw== enpaul@ansible.discovery
|
||||
1318
skylab/core/roles/dashboard/files/grafana.ini
Normal file
1318
skylab/core/roles/dashboard/files/grafana.ini
Normal file
File diff suppressed because it is too large
Load Diff
9
skylab/core/roles/dashboard/files/grafana.repo
Normal file
9
skylab/core/roles/dashboard/files/grafana.repo
Normal file
@@ -0,0 +1,9 @@
|
||||
[grafana]
|
||||
name=grafana
|
||||
baseurl=https://packages.grafana.com/enterprise/rpm
|
||||
repo_gpgcheck=1
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://packages.grafana.com/gpg.key
|
||||
sslverify=1
|
||||
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
|
||||
22
skylab/core/roles/dashboard/files/ssl-options.conf
Normal file
22
skylab/core/roles/dashboard/files/ssl-options.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
# Ansible managed file - DO NOT EDIT
|
||||
#
|
||||
# https://www.digitalocean.com/community/tutorials/how-to-create-a-self-signed-ssl-certificate-for-nginx-in-ubuntu-16-04
|
||||
#
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
|
||||
ssl_ecdh_curve secp384r1;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_tickets off;
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
resolver 1.1.1.1 1.0.0.1 valid=300s;
|
||||
resolver_timeout 5s;
|
||||
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains";
|
||||
add_header X-Frame-Options DENY;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
ssl_dhparam /etc/nginx/ssl-dhparam.pem;
|
||||
|
||||
# EOF
|
||||
12
skylab/core/roles/dashboard/handlers/main.yaml
Normal file
12
skylab/core/roles/dashboard/handlers/main.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: restart-nginx
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: nginx
|
||||
state: restarted
|
||||
|
||||
- name: restart-grafana
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: grafana-server
|
||||
state: restarted
|
||||
48
skylab/core/roles/dashboard/tasks/grafana.yaml
Normal file
48
skylab/core/roles/dashboard/tasks/grafana.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: Install Grafana Enterprise repository
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: grafana.repo
|
||||
dest: /etc/yum.repos.d/grafana.repo
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
register: _grafana_repo
|
||||
|
||||
- name: Install Grafana repository GPG key
|
||||
become: true
|
||||
ansible.builtin.rpm_key:
|
||||
state: present
|
||||
key: https://packages.grafana.com/gpg.key
|
||||
|
||||
- name: Install Grafana
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: grafana
|
||||
state: present
|
||||
update_cache: "{{ _grafana_repo.changed }}"
|
||||
|
||||
- name: Enable and start Grafana
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: grafana-server
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Fetch installed grafana plugins
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: grafana-cli plugins ls
|
||||
changed_when: false
|
||||
register: _grafana_plugins_raw
|
||||
|
||||
- name: Install plugins
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: grafana-cli plugins install {{ item }}
|
||||
changed_when: item not in _grafana_plugins_raw.stdout
|
||||
notify: [restart-grafana]
|
||||
loop:
|
||||
- marcusolsson-json-datasource
|
||||
- grafana-clock-panel
|
||||
- ayoungprogrammer-finance-datasource
|
||||
6
skylab/core/roles/dashboard/tasks/main.yaml
Normal file
6
skylab/core/roles/dashboard/tasks/main.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Install and configure Grafana
|
||||
ansible.builtin.import_tasks: grafana.yaml
|
||||
|
||||
- name: Install and configure Nginx
|
||||
ansible.builtin.import_tasks: nginx.yaml
|
||||
107
skylab/core/roles/dashboard/tasks/nginx.yaml
Normal file
107
skylab/core/roles/dashboard/tasks/nginx.yaml
Normal file
@@ -0,0 +1,107 @@
|
||||
---
|
||||
- name: Install nginx
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: nginx
|
||||
state: present
|
||||
|
||||
- name: Enable and start nginx
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: nginx
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Configure firewall for Nginx
|
||||
become: true
|
||||
ansible.posix.firewalld:
|
||||
service: "{{ item }}"
|
||||
state: enabled
|
||||
zone: internal
|
||||
permanent: true
|
||||
immediate: true
|
||||
loop:
|
||||
- http
|
||||
- https
|
||||
|
||||
- name: Configure SELinux for Nginx
|
||||
when: ansible_selinux.status | default("") == "enabled"
|
||||
become: true
|
||||
ansible.posix.seboolean:
|
||||
name: httpd_can_network_connect
|
||||
state: true
|
||||
persistent: true
|
||||
notify: [restart-nginx]
|
||||
|
||||
- name: Create certificate directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ dashboard_certificate_directory }}"
|
||||
state: directory
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0570
|
||||
|
||||
- name: Generate X509 private key
|
||||
become: true
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
community.crypto.openssl_privatekey:
|
||||
path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key"
|
||||
type: RSA
|
||||
size: 8192
|
||||
passphrase: "{{ dashboard_certificate_password }}"
|
||||
cipher: auto
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0460
|
||||
|
||||
- name: Install private key password file
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
content: "{{ dashboard_certificate_password }}"
|
||||
dest: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.password"
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0460
|
||||
|
||||
- name: Create self-signed certificate
|
||||
become: true
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
community.crypto.x509_certificate:
|
||||
path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.pem"
|
||||
privatekey_path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key"
|
||||
privatekey_passphrase: "{{ dashboard_certificate_password }}"
|
||||
provider: selfsigned
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0460
|
||||
notify: [restart-nginx]
|
||||
|
||||
- name: Copy nginx SSL parameters
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: ssl-options.conf
|
||||
dest: /etc/nginx/ssl-options.conf
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0664
|
||||
notify: [restart-nginx]
|
||||
|
||||
- name: Export Diffie-Hellman parameters
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: openssl dhparam -out /etc/nginx/ssl-dhparam.pem 2048
|
||||
creates: /etc/nginx/ssl-dhparam.pem
|
||||
notify: [restart-nginx]
|
||||
|
||||
- name: Configure nginx server
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nginx.conf.j2
|
||||
dest: /etc/nginx/conf.d/{{ dashboard_hostname }}.conf
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0444
|
||||
notify: [restart-nginx]
|
||||
29
skylab/core/roles/dashboard/templates/nginx.conf.j2
Normal file
29
skylab/core/roles/dashboard/templates/nginx.conf.j2
Normal file
@@ -0,0 +1,29 @@
|
||||
# Ansible managed file - DO NOT MANUALLY EDIT
|
||||
#
|
||||
server {
|
||||
server_name {{ dashboard_hostname }};
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:3000/;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
listen 443 ssl http2;
|
||||
ssl_certificate {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.pem;
|
||||
ssl_certificate_key {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key;
|
||||
ssl_password_file {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.password;
|
||||
include /etc/nginx/ssl-options.conf;
|
||||
}
|
||||
|
||||
server {
|
||||
if ($host = {{ dashboard_hostname }}) {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
server_name {{ dashboard_hostname }};
|
||||
listen 80;
|
||||
return 404;
|
||||
}
|
||||
#
|
||||
# EOF
|
||||
15
skylab/core/roles/dashboard/vars/main.yaml
Normal file
15
skylab/core/roles/dashboard/vars/main.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
dashboard_certificate_directory: /etc/nginx/certs
|
||||
dashboard_certificate_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
62373265623036656632396637363539313437656433656461356561393538333536303961363462
|
||||
3964353831633165363430313533623563343732623930630a393030393336613563313431306233
|
||||
62393235303234336365313138633137663430653061343737616466303136616130643061356566
|
||||
3165313038393163340a396365643335343332333335363539326635633466313264373639353930
|
||||
36646462396139346432353233646635303031613639323266366235373132346363653431323666
|
||||
38336365303431646530613030613437663035613332653865366432636238303437323633666239
|
||||
64366435353762656362666531393865383639343461616365316634326334623733653664666161
|
||||
63366234646466326531363666633966326462373562313839393731633931383762306663396562
|
||||
65663031653661333439373461333234613863623364643464323863656630386561316565353232
|
||||
35313338373631356231376361346662353365373030653965626434336339613936656138656637
|
||||
666430306334623563306236616663623438
|
||||
67
skylab/core/roles/datastore/meta/argument_specs.yaml
Normal file
67
skylab/core/roles/datastore/meta/argument_specs.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
description: >-
|
||||
This role makes several assumptions about the local storage configuration of the server:
|
||||
|
||||
1. There is one block device on the server that will be used for data storage
|
||||
2. That block device will be joined to a glusterfs volume
|
||||
3. The block device is encrypted with LUKS
|
||||
|
||||
This role mostly serves to perform housekeeping tasks and validation of expected configs.
|
||||
Automating disk configuration seems like a really good way to lose all my data, so I decided
|
||||
to leave that to the much more reliable manual configuration for the time being.
|
||||
|
||||
To that end, here is a quick cheatsheet of commands that might be useful in setting up
|
||||
storage device(s) for this role (replace `DEVICE` with the block device for storage):
|
||||
|
||||
```bash
|
||||
# Encrypt a block device, provide encryption key when prompted
|
||||
cryptsetup luksFormat --type luks2 /dev/DEVICE
|
||||
|
||||
# Unlock encrypted block device and mount under a mapper
|
||||
cryptsetup luksOpen /dev/DEVICE LABEL
|
||||
|
||||
# Lock an encrypted block device
|
||||
cryptsetup luksClose LABEL
|
||||
|
||||
# Create and format a partition on the encrypted block device
|
||||
mkfs.xfs /dev/mapper/LABEL -L LABEL
|
||||
|
||||
# Run from an existing server already in the gluster pool
|
||||
# Add server to the gluster pool
|
||||
gluster peer probe HOSTNAME
|
||||
|
||||
# To replace a brick from an already offline'd node, the old brick first needs to be force
|
||||
# removed, replication reduced, and (if arbiter is enabled) any arbiter nodes removed
|
||||
#
|
||||
# Remove arbiter brick
|
||||
gluster volume remove-brick VOLUME replica 2 HOSTNAME:/EXPORT force
|
||||
# Remove dead data brick
|
||||
gluster volume remove-brick VOLUME replica 1 HOSTNAME:/EXPORT force
|
||||
# Remove dead node
|
||||
gluster peer detach HOSTNAME
|
||||
# Add new data brick
|
||||
gluster volume add-brick VOLUME replica 2 HOSTNAME:/EXPORT start
|
||||
#
|
||||
# To re-add the arbiter you might need to clean up the `.glusterfs` directory and remove
|
||||
# directory parametes from the old brick. These next commands need to be run on the host
|
||||
# with the arbiter brick physically attached
|
||||
#
|
||||
rm -rf /EXPORT/.glusterfs
|
||||
setfattr -x trusted.gfid /EXPORT
|
||||
setfattr -x trusted.glusterfs.volume-id /EXPORT
|
||||
# Re-add arbiter brick
|
||||
gluster volume add-brick VOLUME replica 3 arbiter 1 HOSTNAME:/EXPORT
|
||||
# Trigger a resync
|
||||
gluster volume heal datastore
|
||||
|
||||
# General gluster debug info
|
||||
gluster volume info VOLUME
|
||||
gluster volume status VOLUME
|
||||
```
|
||||
options:
|
||||
skylab_datastore_device:
|
||||
description: The block device under `/dev/` that should be configured as datastore storage
|
||||
type: str
|
||||
required: true
|
||||
52
skylab/core/roles/datastore/tasks/gluster.yaml
Normal file
52
skylab/core/roles/datastore/tasks/gluster.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
- name: Allow gluster through firewall
|
||||
become: true
|
||||
ansible.posix.firewalld:
|
||||
service: glusterfs
|
||||
state: enabled
|
||||
zone: trusted
|
||||
immediate: true
|
||||
permanent: true
|
||||
|
||||
- name: Create datastore directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /mnt/brick/datastore
|
||||
state: directory
|
||||
|
||||
- name: Start and disable glusterd
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: glusterd
|
||||
state: started
|
||||
enabled: false
|
||||
|
||||
- name: Fetch peer status
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: gluster peer status
|
||||
changed_when: false
|
||||
register: _gluster_peer_status_raw
|
||||
|
||||
- name: Check peer status
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- not _gluster_peer_status_raw.stdout_lines[0].strip().endswith('0')
|
||||
fail_msg: >-
|
||||
ERROR: Datastore host '{{ inventory_hostname }}' is not joined to the gluster pool. Run the
|
||||
command 'gluster peer probe {{ inventory_hostname }}.local' from another datastore host to
|
||||
add it.
|
||||
success_msg: >-
|
||||
Datastore host {{ inventory_hostname }} is joined to the gluster pool
|
||||
|
||||
- name: Mount gluster volume
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: /mnt/datastore
|
||||
src: localhost:/datastore
|
||||
state: mounted
|
||||
fstype: glusterfs
|
||||
# Note that this just needs to be any path *other* than the actual
|
||||
# fstab. This is done just to prevent the devices from being
|
||||
# automatically mounted at boot
|
||||
fstab: "{{ skylab_state_dir }}/mounts"
|
||||
9
skylab/core/roles/datastore/tasks/main.yaml
Normal file
9
skylab/core/roles/datastore/tasks/main.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Install datastore packages
|
||||
ansible.builtin.import_tasks: packages.yaml
|
||||
|
||||
- name: Configure mounting
|
||||
ansible.builtin.import_tasks: mounts.yaml
|
||||
|
||||
- name: Configure glusterfs
|
||||
ansible.builtin.import_tasks: gluster.yaml
|
||||
109
skylab/core/roles/datastore/tasks/mounts.yaml
Normal file
109
skylab/core/roles/datastore/tasks/mounts.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
---
|
||||
- name: Create mount points
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
loop:
|
||||
- /mnt/datastore
|
||||
- /mnt/brick
|
||||
|
||||
- name: Determine current mounts
|
||||
vars:
|
||||
_current_mounts: []
|
||||
ansible.builtin.set_fact:
|
||||
_current_mounts: "{{ _current_mounts + [item.mount] }}"
|
||||
loop: "{{ ansible_mounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.mount }}"
|
||||
|
||||
- name: Ensure mount points are empty when unmounted
|
||||
when: item not in _current_mounts
|
||||
ansible.builtin.command:
|
||||
cmd: "/usr/bin/ls {{ item }}"
|
||||
changed_when: false
|
||||
failed_when: _mountpoint_ls_raw.stdout
|
||||
register: _mountpoint_ls_raw
|
||||
loop:
|
||||
- /mnt/datastore
|
||||
- /mnt/brick
|
||||
|
||||
- name: Fetch block device information
|
||||
ansible.builtin.command:
|
||||
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
|
||||
changed_when: false
|
||||
register: _lsblk_info_raw
|
||||
|
||||
- name: Process block device information
|
||||
ansible.builtin.set_fact:
|
||||
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
|
||||
|
||||
- name: Check state of the datastore device
|
||||
ansible.builtin.assert:
|
||||
that: _datastore_device_info.fstype == "crypto_LUKS"
|
||||
fail_msg: >-
|
||||
ERROR: Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }}
|
||||
must be LUKS encrypted
|
||||
success_msg: >-
|
||||
Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is
|
||||
LUKS encrypted
|
||||
|
||||
- name: Determine whether datastore block is decrypted
|
||||
ansible.builtin.set_fact:
|
||||
_datastore_device_is_decrypted: "{{ _datastore_device_info.children is defined }}"
|
||||
|
||||
- name: Decrypt datastore block
|
||||
when: not _datastore_device_is_decrypted
|
||||
block:
|
||||
- name: Prompt for decryption key
|
||||
no_log: true
|
||||
when: skylab_datastore_encryption_password is not defined
|
||||
ansible.builtin.pause:
|
||||
prompt: >-
|
||||
Datastore device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is not
|
||||
decrypted. Enter decryption passphrase to continue GlusterFS brick configuration
|
||||
echo: false
|
||||
register: _luks_decryption_key
|
||||
|
||||
- name: Open LUKS device
|
||||
become: true
|
||||
community.crypto.luks_device:
|
||||
device: /dev/{{ skylab_datastore_device }}
|
||||
state: opened
|
||||
name: brick
|
||||
passphrase: "{{ _luks_decryption_key.user_input | default(skylab_datastore_encryption_password) }}"
|
||||
|
||||
- name: Fetch updated block device information
|
||||
ansible.builtin.command:
|
||||
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
|
||||
changed_when: false
|
||||
register: _lsblk_info_raw
|
||||
|
||||
- name: Process updated block device information
|
||||
ansible.builtin.set_fact:
|
||||
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
|
||||
|
||||
- name: Create dummy fstab
|
||||
ansible.builtin.file:
|
||||
state: touch
|
||||
path: "{{ skylab_state_dir }}/mounts"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
access_time: preserve
|
||||
modification_time: preserve
|
||||
|
||||
- name: Mount datastore block
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: /mnt/brick
|
||||
src: UUID={{ _datastore_device_info.children[0].uuid }}
|
||||
state: mounted
|
||||
fstype: "{{ _datastore_device_info.children[0].fstype }}"
|
||||
# Note that this just needs to be any path *other* than the actual
|
||||
# fstab. This is done just to prevent the devices from being
|
||||
# automatically mounted at boot
|
||||
fstab: "{{ skylab_state_dir }}/mounts"
|
||||
31
skylab/core/roles/datastore/tasks/packages.yaml
Normal file
31
skylab/core/roles/datastore/tasks/packages.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Install gluster repository
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: centos-release-gluster9
|
||||
state: present
|
||||
register: _datastore_repo_gluster
|
||||
|
||||
- name: Enable required repositories
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/yum.repos.d/{{ item }}.repo
|
||||
line: enabled=1
|
||||
state: present
|
||||
regexp: "#?enabled=(0|1)"
|
||||
loop:
|
||||
- Rocky-AppStream
|
||||
- Rocky-PowerTools
|
||||
register: _datastore_repo_powertools
|
||||
|
||||
- name: Install datastore packages
|
||||
become: true
|
||||
when: ansible_distribution == "Rocky"
|
||||
ansible.builtin.dnf:
|
||||
state: present
|
||||
update_cache: "{{ _datastore_repo_powertools.changed or _datastore_repo_gluster.changed }}"
|
||||
name:
|
||||
- cryptsetup-luks
|
||||
- glusterfs
|
||||
- glusterfs-fuse
|
||||
- glusterfs-server
|
||||
6
skylab/core/roles/server/handlers/main.yaml
Normal file
6
skylab/core/roles/server/handlers/main.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: restart-sshd
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: sshd
|
||||
state: restarted
|
||||
20
skylab/core/roles/server/tasks/firewalld.yaml
Normal file
20
skylab/core/roles/server/tasks/firewalld.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Enable systemd-firewalld
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: firewalld
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Configure firewall interface zones
|
||||
become: true
|
||||
when: item.value.firewall is defined
|
||||
ansible.posix.firewalld:
|
||||
interface: "{{ item.key }}"
|
||||
zone: "{{ item.value.firewall }}"
|
||||
state: enabled
|
||||
permanent: true
|
||||
immediate: true
|
||||
loop: "{{ skylab_networking | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
32
skylab/core/roles/server/tasks/hosts.yaml
Normal file
32
skylab/core/roles/server/tasks/hosts.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: Retrieve current hostsfile contents
|
||||
ansible.builtin.command:
|
||||
cmd: cat /etc/hosts
|
||||
changed_when: false
|
||||
register: _existing_hostsfile_raw
|
||||
|
||||
- name: Assemble hostsfile lines
|
||||
vars:
|
||||
_hostsfile_lines: []
|
||||
ansible.builtin.set_fact:
|
||||
_hostsfile_lines: "{{ _hostsfile_lines + [hostvars[item].skylab_cluster.address.internal | ansible.netcommon.ipaddr('address') + ' ' + item + '.local ' + hostvars[item].skylab_legacy_names | default([]) | join(' ')] }}"
|
||||
loop: "{{ groups.cluster }}"
|
||||
|
||||
- name: Configure local hostsfile
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
line: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ _hostsfile_lines }}"
|
||||
loop_control:
|
||||
label: "{{ item.partition(' ')[0] }}"
|
||||
|
||||
- name: Remove unmanaged hostsfile entries
|
||||
become: true
|
||||
when: "'localhost' not in item and item not in _hostsfile_lines"
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
line: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_hostsfile_raw.stdout_lines }}"
|
||||
24
skylab/core/roles/server/tasks/main.yaml
Normal file
24
skylab/core/roles/server/tasks/main.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Configure sudoers file
|
||||
ansible.builtin.import_tasks: sudoers.yaml
|
||||
|
||||
- name: Configure SSH server
|
||||
ansible.builtin.import_tasks: sshd.yaml
|
||||
|
||||
- name: Configure network settings
|
||||
when: skylab_networking is defined
|
||||
ansible.builtin.include_tasks: networkd.yaml
|
||||
|
||||
- name: Configure firewall settings
|
||||
when: skylab_networking is defined
|
||||
ansible.builtin.include_tasks: firewalld.yaml
|
||||
|
||||
- name: Configure hostsfile
|
||||
when: "inventory_hostname in groups.cluster"
|
||||
ansible.builtin.import_tasks: hosts.yaml
|
||||
|
||||
- name: Enable tmpfs mount
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: tmp.mount
|
||||
enabled: true
|
||||
97
skylab/core/roles/server/tasks/networkd.yaml
Normal file
97
skylab/core/roles/server/tasks/networkd.yaml
Normal file
@@ -0,0 +1,97 @@
|
||||
---
|
||||
- name: Configure network settings
|
||||
become: true
|
||||
block:
|
||||
- name: Install systemd-networkd on Rocky
|
||||
ansible.builtin.dnf:
|
||||
name: systemd-networkd
|
||||
state: present
|
||||
|
||||
- name: Ensure network config directory exists
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/network
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Create network files
|
||||
ansible.builtin.template:
|
||||
src: network.j2
|
||||
dest: /etc/systemd/network/{{ item.key }}.network
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
loop: "{{ skylab_networking | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Create netdev files
|
||||
when: item.value.device is defined
|
||||
ansible.builtin.template:
|
||||
src: netdev.j2
|
||||
dest: /etc/systemd/network/{{ item.key }}.netdev
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
loop: "{{ skylab_networking | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Fetch existing network config directory contents
|
||||
changed_when: false
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/bin/ls /etc/systemd/network
|
||||
register: _network_config_dir_raw
|
||||
|
||||
- name: Remove legacy network config files
|
||||
when: item.strip().replace('.netdev', '').replace('.network', '') not in skylab_networking
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/network/{{ item }}
|
||||
state: absent
|
||||
loop: "{{ _network_config_dir_raw.stdout_lines }}"
|
||||
loop_control:
|
||||
label: "{{ item.strip() }}"
|
||||
|
||||
- name: Configure fallback DNS
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/systemd/resolved.conf
|
||||
create: false
|
||||
line: FallbackDNS=
|
||||
|
||||
- name: Enable systemd-networkd
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
enabled: true
|
||||
loop:
|
||||
- systemd-networkd
|
||||
- systemd-networkd-wait-online
|
||||
- systemd-resolved
|
||||
|
||||
- name: Disable NetworkManager
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
enabled: false
|
||||
loop:
|
||||
- NetworkManager
|
||||
- NetworkManager-wait-online
|
||||
|
||||
- name: Start systemd-resolved to enable symlink creation
|
||||
ansible.builtin.systemd:
|
||||
name: systemd-resolved
|
||||
state: started
|
||||
|
||||
- name: Link system resolv.conf to systemd-resolved
|
||||
ansible.builtin.file:
|
||||
dest: /etc/resolv.conf
|
||||
src: /run/systemd/resolve/resolv.conf
|
||||
state: link
|
||||
force: true
|
||||
setype: net_conf_t
|
||||
|
||||
- name: Link systemd-resolved to multi-user target
|
||||
ansible.builtin.file:
|
||||
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
|
||||
src: /usr/lib/systemd/system/systemd-resolved.service
|
||||
state: link
|
||||
force: true
|
||||
40
skylab/core/roles/server/tasks/sshd.yaml
Normal file
40
skylab/core/roles/server/tasks/sshd.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
- name: Configure SSH authentication settings
|
||||
become: true
|
||||
ansible.builtin.replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "{{ item.regex }}"
|
||||
replace: "{{ item.value }}"
|
||||
notify: [restart-sshd]
|
||||
loop:
|
||||
- regex: "^.*PermitRootLogin (yes|no).*$"
|
||||
value: PermitRootLogin no
|
||||
- regex: "^.*PasswordAuthentication (yes|no).*$"
|
||||
value: PasswordAuthentication no
|
||||
- regex: "^.*ChallengeResponseAuthentication (yes|no).*$"
|
||||
value: ChallengeResponseAuthentication no
|
||||
- regex: "^.*GSSAPIAuthentication (yes|no).*$"
|
||||
value: GSSAPIAuthentication no
|
||||
loop_control:
|
||||
label: "{{ item.value }}"
|
||||
|
||||
- name: Disable dynamic MOTD on debian systems
|
||||
when: ansible_os_family == "Debian"
|
||||
ansible.builtin.replace:
|
||||
path: /etc/pam.d/sshd
|
||||
regexp: "^session optional pam_motd.so motd=/run/motd.dynamic"
|
||||
replace: "#session optional pam_motd.so motd=/run/motd.dynamic"
|
||||
|
||||
- name: Disable Cockpit activation message on Rocky
|
||||
when: ansible_distribution == "Rocky"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/motd.d/cockpit
|
||||
state: absent
|
||||
|
||||
- name: Copy MOTD to remote
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: motd.j2
|
||||
dest: /etc/motd
|
||||
mode: 0644
|
||||
30
skylab/core/roles/server/tasks/sudoers.yaml
Normal file
30
skylab/core/roles/server/tasks/sudoers.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Disable sudo password for WHEEL group
|
||||
when: ansible_distribution == "Rocky" or ansible_distribution == "CentOS"
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
content: "%wheel ALL=(ALL) NOPASSWD: ALL"
|
||||
dest: /etc/sudoers.d/30-wheel
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
|
||||
# Note that the cleanup tasks need to be after the new installation tasks
|
||||
# since one or more files being cleaned up might be being relied on to
|
||||
# allow ansible access
|
||||
- name: Fetch content of sudoers config directory
|
||||
become: true
|
||||
changed_when: false
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/bin/ls /etc/sudoers.d/
|
||||
register: _sudoers_files_raw
|
||||
|
||||
- name: Remove legacy sudoers config files
|
||||
when: item.strip() not in ["30-wheel"]
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/sudoers.d/{{ item.strip() }}
|
||||
state: absent
|
||||
loop: "{{ _sudoers_files_raw.stdout_lines }}"
|
||||
loop_control:
|
||||
label: "/etc/sudoers.d/{{ item.strip() }}"
|
||||
11
skylab/core/roles/server/templates/motd.j2
Normal file
11
skylab/core/roles/server/templates/motd.j2
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
/####### /## /## /## /## /## /####### /#####
|
||||
/## /##___/## /##___/## /## /##___/## /##__/##
|
||||
/##____ /####### /######## /## /######## /######
|
||||
######## /## |## /## /## /## /## /##__/##
|
||||
/## /## |## /####### /## /## /## /######
|
||||
____/## /##_____
|
||||
/###### ******************* /######## ************
|
||||
|
||||
✨ {{ skylab_description }} @{{ skylab_location }}
|
||||
{{ ' ' }}
|
||||
18
skylab/core/roles/server/templates/netdev.j2
Normal file
18
skylab/core/roles/server/templates/netdev.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
# ANSIBLE MANAGED FILE - DO NOT MANUALLY EDIT
|
||||
#
|
||||
[NetDev]
|
||||
Name={{ item.key }}
|
||||
Kind={{ item.value.device }}
|
||||
|
||||
{% if item.value.device.lower() == 'bond' %}
|
||||
[Bond]
|
||||
Mode={{ item.value.bond_mode | default('balance-rr') }}
|
||||
PrimaryReselectPolicy=always
|
||||
MIIMonitorSec=1s
|
||||
{% endif %}
|
||||
{% if item.value.device.lower() == 'vlan' %}
|
||||
[VLAN]
|
||||
Id={{ item.key.partition('.')[2] }}
|
||||
{% endif %}
|
||||
|
||||
# EOF
|
||||
32
skylab/core/roles/server/templates/network.j2
Normal file
32
skylab/core/roles/server/templates/network.j2
Normal file
@@ -0,0 +1,32 @@
|
||||
# ANSIBLE MANAGED FILE - DO NOT EDIT
|
||||
#
|
||||
[Match]
|
||||
Name={{ item.key }}
|
||||
|
||||
[Network]
|
||||
DHCP={{ "Yes" if item.value.dhcp | default(false) else "No" }}
|
||||
IPv6AcceptRA=No
|
||||
{% if item.value.dns is defined %}
|
||||
{% for server in item.value.dns %}
|
||||
DNS={{ server }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if item.value.bond is defined %}
|
||||
Bond={{ item.value.bond }}
|
||||
{% endif %}
|
||||
{% if not item.value.dhcp | default(false) %}
|
||||
|
||||
{% if item.value.gateway is defined %}
|
||||
Gateway={{ item.value.gateway | ansible.netcommon.ipaddr('address') }}
|
||||
{% endif %}
|
||||
{% for address in item.value.addresses | default([]) %}
|
||||
Address={{ address | ansible.netcommon.ipaddr('host/prefix') }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% for interface in skylab_networking.keys() %}
|
||||
{% if interface.startswith(item.key) and interface.partition('.')[2] | regex_search('^[0-9]{1,4}$') and interface != item.key %}
|
||||
VLAN={{ interface }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
# EOF
|
||||
69
skylab/core/roles/swarm/tasks/check.yaml
Normal file
69
skylab/core/roles/swarm/tasks/check.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
- name: Check cluster swarm status
|
||||
run_once: true
|
||||
block:
|
||||
- name: Fetch cluster server swarm info
|
||||
delegate_to: "{{ item }}"
|
||||
ansible.builtin.command:
|
||||
cmd: !unsafe docker info --format '{{json .Swarm}}'
|
||||
changed_when: false
|
||||
register: _docker_cluster_swarm_state_raw
|
||||
loop: "{{ groups.cluster }}"
|
||||
|
||||
- name: Process cluster server swarm info
|
||||
vars:
|
||||
_docker_cluster_swarm_state: {}
|
||||
ansible.builtin.set_fact:
|
||||
_docker_cluster_swarm_state: "{{ _docker_cluster_swarm_state | combine({item.item: (item.stdout | from_json)}) }}"
|
||||
loop: "{{ _docker_cluster_swarm_state_raw.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item }}"
|
||||
|
||||
- name: Identify swarm managers
|
||||
vars:
|
||||
_docker_cluster_swarm_managers: []
|
||||
when: item.value.LocalNodeState == 'active' and item.value.ControlAvailable
|
||||
ansible.builtin.set_fact:
|
||||
_docker_cluster_swarm_managers: "{{ _docker_cluster_swarm_managers + [item.key] }}"
|
||||
loop: "{{ _docker_cluster_swarm_state | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Check that swarm managers were discovered
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- _docker_cluster_swarm_managers
|
||||
fail_msg: >-
|
||||
ERROR: None of the member cluster servers ({{ groups.cluster | join(', ') }}) are joined to
|
||||
a docker swarm or is a swarm manager. Please join at least one cluster server to a swarm and
|
||||
promote it to swarm manager
|
||||
success_msg: >-
|
||||
Identified {{ _docker_cluster_swarm_managers | count }} swarm managers
|
||||
({{ _docker_cluster_swarm_managers | join(', ') }})
|
||||
|
||||
- name: Determine swarm manager cluster IDs
|
||||
vars:
|
||||
_docker_cluster_swarm_manager_cluster_ids: []
|
||||
ansible.builtin.set_fact:
|
||||
_docker_cluster_swarm_manager_cluster_ids: "{{ _docker_cluster_swarm_manager_cluster_ids + [_docker_cluster_swarm_state[item].Cluster.ID] }}"
|
||||
loop: "{{ _docker_cluster_swarm_managers }}"
|
||||
|
||||
- name: Check swarm managers are part of the same swarm
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- _docker_cluster_swarm_manager_cluster_ids | unique | count == 1
|
||||
fail_msg: >-
|
||||
ERROR: Swarm managers ({{ _docker_cluster_swarm_managers | join(', ') }}) appear to be
|
||||
joined to different swarms
|
||||
(IDs {{ _docker_cluster_swarm_manager_cluster_ids | join(', ') }})
|
||||
success_msg: >-
|
||||
Swarm managers are joined to swarm with ID
|
||||
{{ _docker_cluster_swarm_manager_cluster_ids[0] }}
|
||||
|
||||
- name: Determine swarm manager to use for host configuration
|
||||
ansible.builtin.set_fact:
|
||||
_docker_swarm_manager: "{{ _docker_cluster_swarm_managers[0] }}"
|
||||
|
||||
- name: Determine whether host needs to be added to the swarm
|
||||
ansible.builtin.set_fact:
|
||||
_docker_swarm_needs_join: "{{ not _docker_cluster_swarm_state[inventory_hostname].Cluster.ID | default('') == _docker_cluster_swarm_manager_cluster_ids[0] }}"
|
||||
53
skylab/core/roles/swarm/tasks/configure.yaml
Normal file
53
skylab/core/roles/swarm/tasks/configure.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Determine docker daemon DNS servers
|
||||
vars:
|
||||
_docker_daemon_dns: []
|
||||
ansible.builtin.set_fact:
|
||||
_docker_daemon_dns: "{{ _docker_daemon_dns + (item.value.dns | default([])) }}"
|
||||
loop: "{{ skylab_networking | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Create docker config directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/docker
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: docker
|
||||
mode: 0750
|
||||
|
||||
- name: Configure docker daemon
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: daemon.json.j2
|
||||
dest: /etc/docker/daemon.json
|
||||
mode: 0640
|
||||
owner: "{{ ansible_user }}"
|
||||
group: docker
|
||||
|
||||
- name: Start and enable docker service
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Include access variables
|
||||
ansible.builtin.include_vars:
|
||||
file: vars/access.yaml
|
||||
|
||||
- name: Add administrators to docker group
|
||||
become: true
|
||||
when: item.admin | default(false) and 'cluster' in (item.targets | default([]))
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.name }}"
|
||||
group: "{{ item.name }}"
|
||||
groups: docker
|
||||
append: true
|
||||
loop: "{{ skylab_accounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }},{{ item.uid }}"
|
||||
|
||||
- name: Reset connection to get new group membership
|
||||
ansible.builtin.meta: reset_connection
|
||||
61
skylab/core/roles/swarm/tasks/gluster.yaml
Normal file
61
skylab/core/roles/swarm/tasks/gluster.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Fetch glusterfs plugin state
|
||||
block:
|
||||
- name: Fetch glusterfs storage plugin state
|
||||
ansible.builtin.command:
|
||||
cmd: docker plugin inspect glusterfs
|
||||
changed_when: false
|
||||
register: _docker_glusterfs_storage_plugin_raw
|
||||
rescue:
|
||||
- name: Install glusterfs storage plugin
|
||||
ansible.builtin.command:
|
||||
cmd: docker plugin install --alias glusterfs mochoa/glusterfs-volume-plugin --grant-all-permissions --disable
|
||||
changed_when: true
|
||||
|
||||
- name: Fetch glusterfs storage plugin state
|
||||
ansible.builtin.command:
|
||||
cmd: docker plugin inspect glusterfs
|
||||
changed_when: false
|
||||
register: _docker_glusterfs_storage_plugin_raw
|
||||
|
||||
- name: Process glusterfs storage plugin config
|
||||
ansible.builtin.set_fact:
|
||||
_docker_glusterfs_storage_plugin: "{{ (_docker_glusterfs_storage_plugin_raw.stdout | from_json)[0] }}"
|
||||
|
||||
# Note that this might not end up being defined if the plugin has not been configured
|
||||
- name: Identify plugin server settings
|
||||
when: item.startswith('SERVERS')
|
||||
ansible.builtin.set_fact:
|
||||
_docker_glusterfs_existing_setting: "{{ item }}"
|
||||
loop: "{{ _docker_glusterfs_storage_plugin.Settings.Env }}"
|
||||
|
||||
- name: Determine gluster servers
|
||||
run_once: true
|
||||
vars:
|
||||
_docker_glusterfs_hostnames: []
|
||||
ansible.builtin.set_fact:
|
||||
_docker_glusterfs_hostnames: "{{ _docker_glusterfs_hostnames + [item + '.local'] }}"
|
||||
loop: "{{ groups.cluster }}"
|
||||
|
||||
- name: Determine gluster plugin setting
|
||||
ansible.builtin.set_fact:
|
||||
_docker_glusterfs_setting: "SERVERS={{ _docker_glusterfs_hostnames | join(',') }}"
|
||||
|
||||
- name: Configure plugin
|
||||
when: _docker_glusterfs_setting != _docker_glusterfs_existing_setting
|
||||
block:
|
||||
- name: Disable plugin
|
||||
when: _docker_glusterfs_storage_plugin.Enabled
|
||||
ansible.builtin.command:
|
||||
cmd: docker plugin disable glusterfs
|
||||
|
||||
- name: Set plugin servers setting
|
||||
changed_when: true
|
||||
ansible.builtin.command:
|
||||
cmd: docker plugin set glusterfs {{ _docker_glusterfs_setting }}
|
||||
register: _docker_glusterfs_set_setting
|
||||
|
||||
- name: Enable plugin
|
||||
when: not _docker_glusterfs_storage_plugin.Enabled or _docker_glusterfs_set_setting.changed | default(false)
|
||||
ansible.builtin.command:
|
||||
cmd: docker plugin enable glusterfs
|
||||
26
skylab/core/roles/swarm/tasks/install.yaml
Normal file
26
skylab/core/roles/swarm/tasks/install.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Install Docker repository
|
||||
become: true
|
||||
ansible.builtin.get_url:
|
||||
url: https://download.docker.com/linux/centos/docker-ce.repo
|
||||
dest: /etc/yum.repos.d/docker-ce.repo
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
register: _docker_repo_status
|
||||
|
||||
- name: Install docker repository GPG key
|
||||
become: true
|
||||
ansible.builtin.rpm_key:
|
||||
key: https://download.docker.com/linux/centos/gpg
|
||||
state: present
|
||||
|
||||
- name: Install Docker
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
state: present
|
||||
name:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
update_cache: "{{ _docker_repo_status.changed }}"
|
||||
48
skylab/core/roles/swarm/tasks/join.yaml
Normal file
48
skylab/core/roles/swarm/tasks/join.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: Fetch join token from existing manager
|
||||
delegate_to: "{{ _docker_swarm_manager }}"
|
||||
changed_when: false
|
||||
ansible.builtin.command:
|
||||
cmd: docker swarm join-token manager --quiet
|
||||
register: _docker_swarm_join_token
|
||||
|
||||
- name: Fetch manager addresses from existing manager
|
||||
delegate_to: "{{ _docker_swarm_manager }}"
|
||||
changed_when: false
|
||||
ansible.builtin.command:
|
||||
cmd: !unsafe docker info --format '{{json .Swarm.RemoteManagers}}'
|
||||
register: _docker_swarm_manager_info_raw
|
||||
|
||||
- name: Process manager addresses
|
||||
vars:
|
||||
_docker_swarm_manager_addresses: []
|
||||
ansible.builtin.set_fact:
|
||||
_docker_swarm_manager_addresses: "{{ _docker_swarm_manager_addresses + [item.Addr] }}"
|
||||
loop: "{{ _docker_swarm_manager_info_raw.stdout | from_json }}"
|
||||
|
||||
- name: Join node to swarm
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
community.docker.docker_swarm:
|
||||
state: join
|
||||
advertise_addr: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.internal).ipv4.address }}"
|
||||
listen_addr: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.internal).ipv4.address }}"
|
||||
remote_addrs: "{{ _docker_swarm_manager_addresses }}"
|
||||
join_token: "{{ _docker_swarm_join_token.stdout.strip() }}"
|
||||
timeout: 1200
|
||||
|
||||
- name: Fetch node swarm ID
|
||||
ansible.builtin.command:
|
||||
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
|
||||
changed_when: false
|
||||
register: _docker_node_id_raw
|
||||
|
||||
# For newly added nodes we don't want to have services be automatically scheduled on them
|
||||
# until the configuration is complete. The node-up playbook will be responsible for updating
|
||||
# the node to make it available in the cluster again
|
||||
- name: Update node to drain
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
|
||||
community.docker.docker_node:
|
||||
availability: drain
|
||||
hostname: "{{ _docker_node_id_raw.stdout.strip() }}"
|
||||
21
skylab/core/roles/swarm/tasks/main.yaml
Normal file
21
skylab/core/roles/swarm/tasks/main.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Install Docker
|
||||
ansible.builtin.import_tasks: install.yaml
|
||||
|
||||
- name: Configure Docker
|
||||
ansible.builtin.import_tasks: configure.yaml
|
||||
|
||||
# This taskfile will set two facts that will be used in subsequent tasks:
|
||||
# * _docker_swarm_needs_join: a boolean indicating whether the host needs to be joined to the swarm
|
||||
# or is already joined
|
||||
# * _docker_swarm_manager: the inventory hostname of a swarm manager that can be delegated to to
|
||||
# fetch swarm joining info
|
||||
- name: Check swarm state ahead of swarm configuration
|
||||
ansible.builtin.import_tasks: check.yaml
|
||||
|
||||
- name: Join server to swarm
|
||||
when: _docker_swarm_needs_join
|
||||
ansible.builtin.include_tasks: join.yaml
|
||||
|
||||
- name: Configure gluster storage driver
|
||||
ansible.builtin.import_tasks: gluster.yaml
|
||||
7
skylab/core/roles/swarm/templates/daemon.json.j2
Normal file
7
skylab/core/roles/swarm/templates/daemon.json.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"dns": [
|
||||
{% for dns_server in _docker_daemon_dns %}
|
||||
"{{ dns_server }}"{{ ',' if not loop.last else '' }}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
2
skylab/core/roles/workstation/files/00-disable-user-list
Normal file
2
skylab/core/roles/workstation/files/00-disable-user-list
Normal file
@@ -0,0 +1,2 @@
|
||||
[org/gnome/login-screen]
|
||||
disable-user-list=true
|
||||
@@ -0,0 +1,2 @@
|
||||
[org/gnome/mutter]
|
||||
experimental-features=['scale-monitor-framebuffer']
|
||||
47
skylab/core/roles/workstation/files/bashrc.sh
Normal file
47
skylab/core/roles/workstation/files/bashrc.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
if [ -f `which powerline-daemon` ]; then
|
||||
powerline-daemon -q
|
||||
POWERLINE_BASH_CONTINUATION=1
|
||||
POWERLINE_BASH_SELECT=1
|
||||
. /usr/share/powerline/bash/powerline.sh
|
||||
fi
|
||||
|
||||
export NVM_DIR="$HOME/.nvm"
|
||||
export PROJECTS_DIR="$HOME/projects"
|
||||
|
||||
function gg() {
|
||||
cd "$PROJECTS_DIR/$1";
|
||||
if [ -f "$PROJECTS_DIR/$1/ansible.cfg" ]; then
|
||||
ANSIBLE_CONFIG="$PROJECTS_DIR/$1/ansible.cfg" ANSIBLE_COLLECTIONS_PATH="$PROJECTS_DIR/$1/.ansible" poetry shell;
|
||||
elif [ -f "$PROJECTS_DIR/$1/pyproject.toml" ]; then
|
||||
poetry shell;
|
||||
fi
|
||||
}
|
||||
|
||||
mpw() {
|
||||
_copy() {
|
||||
if hash pbcopy 2>/dev/null; then
|
||||
pbcopy
|
||||
elif hash xclip 2>/dev/null; then
|
||||
xclip -selection clip
|
||||
else
|
||||
cat; echo 2>/dev/null
|
||||
return
|
||||
fi
|
||||
echo >&2 "Copied!"
|
||||
}
|
||||
|
||||
# Empty the clipboard
|
||||
:| _copy 2>/dev/null
|
||||
|
||||
# Ask for the user's name and password if not yet known.
|
||||
MPW_FULLNAME="Ethan Paul"
|
||||
|
||||
# Start Master Password and copy the output.
|
||||
printf %s "$(MPW_FULLNAME=$MPW_FULLNAME command mpw "$@")" | _copy
|
||||
}
|
||||
|
||||
alias explorer='nautilus'
|
||||
alias doc='cd ~/Documents'
|
||||
alias dn='cd ~/Downloads'
|
||||
alias prun="poetry run"
|
||||
alias psync="poetry install --remove-untracked"
|
||||
3
skylab/core/roles/workstation/files/gdm-system
Normal file
3
skylab/core/roles/workstation/files/gdm-system
Normal file
@@ -0,0 +1,3 @@
|
||||
user-db:user
|
||||
system-db:gdm
|
||||
file-db:/usr/share/gdm/greeter-dconf-defaults
|
||||
2
skylab/core/roles/workstation/files/gdm-user
Normal file
2
skylab/core/roles/workstation/files/gdm-user
Normal file
@@ -0,0 +1,2 @@
|
||||
user-db:user
|
||||
system-db:local
|
||||
BIN
skylab/core/roles/workstation/files/lightningbug-dark.tar.gz
Normal file
BIN
skylab/core/roles/workstation/files/lightningbug-dark.tar.gz
Normal file
Binary file not shown.
BIN
skylab/core/roles/workstation/files/multimc.png
Normal file
BIN
skylab/core/roles/workstation/files/multimc.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
BIN
skylab/core/roles/workstation/files/wallpaper-discovery.jpg
Normal file
BIN
skylab/core/roles/workstation/files/wallpaper-discovery.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 664 KiB |
BIN
skylab/core/roles/workstation/files/wallpaper-voyager.jpg
Normal file
BIN
skylab/core/roles/workstation/files/wallpaper-voyager.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 243 KiB |
6
skylab/core/roles/workstation/handlers/main.yml
Normal file
6
skylab/core/roles/workstation/handlers/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: dconf-update
|
||||
become: true
|
||||
changed_when: true
|
||||
ansible.builtin.command:
|
||||
cmd: dconf update
|
||||
144
skylab/core/roles/workstation/tasks/environment.yml
Normal file
144
skylab/core/roles/workstation/tasks/environment.yml
Normal file
@@ -0,0 +1,144 @@
|
||||
---
|
||||
- name: Install user bashrc
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: bashrc.sh
|
||||
dest: ~{{ item }}/.bashrc_ansible
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ item }}"
|
||||
mode: 0644
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Configure user bashrc loading
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: ~{{ item }}/.bashrc
|
||||
line: source ~/.bashrc_ansible
|
||||
state: present
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Configure local bash completions loading
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: ~{{ item }}/.bashrc
|
||||
line: source ~/.config/bash_completions
|
||||
state: present
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Configure bash completions
|
||||
become: true
|
||||
ansible.builtin.blockinfile:
|
||||
path: ~{{ item }}/.config/bash_completions
|
||||
create: true
|
||||
block: >-
|
||||
function _gg_completion() {
|
||||
local cur=${COMP_WORDS[COMP_CWORD]};
|
||||
COMPREPLY=( $(compgen -W "$(command ls $PROJECTS_DIR)" -- $cur) );
|
||||
}
|
||||
|
||||
complete -F _gg_completion gg
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ item }}"
|
||||
mode: 0664
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Enforce ownership of the SSH keys
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: ~{{ item.0 }}/.ssh/id_ed25519{{ item.1 }}
|
||||
state: file
|
||||
owner: "{{ item.0 }}"
|
||||
group: "{{ item.0 }}"
|
||||
loop: "{{ _local_human_users | product(['', '.pub']) }}"
|
||||
|
||||
- name: Configure dconf setting
|
||||
become: true
|
||||
block:
|
||||
- name: Create dconf config directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- /etc/dconf/profile
|
||||
- /etc/dconf/db/gdm.d
|
||||
|
||||
- name: Create global dconf config
|
||||
ansible.builtin.copy:
|
||||
src: gdm-system
|
||||
dest: /etc/dconf/profile/gdm
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
notify:
|
||||
- dconf-update
|
||||
|
||||
- name: Create user dconf config
|
||||
ansible.builtin.copy:
|
||||
src: gdm-user
|
||||
dest: /etc/dconf/profile/user
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
notify:
|
||||
- dconf-update
|
||||
|
||||
- name: Disable user list
|
||||
ansible.builtin.copy:
|
||||
src: 00-disable-user-list
|
||||
dest: /etc/dconf/db/gdm.d/00-disable-user-list
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
notify:
|
||||
- dconf-update
|
||||
|
||||
- name: Enable fractional scaling
|
||||
ansible.builtin.copy:
|
||||
src: 00-enable-fractional-scaling
|
||||
dest: /etc/dconf/db/local.d/00-enable-fractional-scaling
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
notify:
|
||||
- dconf-update
|
||||
|
||||
- name: Install themes
|
||||
become: true
|
||||
block:
|
||||
- name: Create local themes directory
|
||||
ansible.builtin.file:
|
||||
path: ~{{ item }}/.themes
|
||||
state: directory
|
||||
owner: "{{ item }}"
|
||||
group: "{{ item }}"
|
||||
mode: 0750
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Unarchive LightningBug into local directory
|
||||
ansible.builtin.unarchive:
|
||||
src: lightningbug-dark.tar.gz
|
||||
dest: ~{{ item }}/.themes
|
||||
owner: "{{ item }}"
|
||||
group: "{{ item }}"
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Install wallpaper
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: wallpaper-{{ inventory_hostname }}.jpg
|
||||
dest: ~{{ item }}/Pictures/wallpaper.jpg
|
||||
owner: "{{ item }}"
|
||||
group: "{{ item }}"
|
||||
loop: "{{ _local_human_users }}"
|
||||
|
||||
- name: Link external media directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: ~{{ item }}/Drives
|
||||
src: /run/media/{{ item }}
|
||||
state: link
|
||||
force: true
|
||||
loop: "{{ _local_human_users }}"
|
||||
59
skylab/core/roles/workstation/tasks/install_mpw.yml
Normal file
59
skylab/core/roles/workstation/tasks/install_mpw.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: Check for MPW binary
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/mpw
|
||||
register: _mpw_binary_stat
|
||||
|
||||
- name: Install MPW
|
||||
when: (not _mpw_binary_stat.stat.exists) or (force_reinstall | default(false))
|
||||
block:
|
||||
- name: Install build dependencies on Fedora
|
||||
when: ansible_distribution == "Fedora"
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name:
|
||||
- libsodium-devel
|
||||
state: present
|
||||
|
||||
- name: Create temporary build directory
|
||||
ansible.builtin.tempfile:
|
||||
prefix: ansible.build.mpw
|
||||
state: directory
|
||||
register: _mpw_build_dir
|
||||
|
||||
- name: Download MPW source
|
||||
ansible.builtin.git:
|
||||
repo: https://gitlab.com/MasterPassword/MasterPassword.git
|
||||
version: 344771db
|
||||
recursive: false # does *not* clone submodules
|
||||
dest: "{{ _mpw_build_dir.path }}"
|
||||
|
||||
# God I hate this
|
||||
- name: Patch .gitmodules to use HTTPS
|
||||
ansible.builtin.replace:
|
||||
path: "{{ _mpw_build_dir.path }}/.gitmodules"
|
||||
regexp: "url = git://"
|
||||
replace: "url = https://"
|
||||
|
||||
- name: Initialize submodules
|
||||
ansible.builtin.command:
|
||||
cmd: git submodule update --init
|
||||
chdir: "{{ _mpw_build_dir.path }}"
|
||||
|
||||
- name: Build MasterPassword binary
|
||||
ansible.builtin.command:
|
||||
cmd: bash build
|
||||
chdir: "{{ _mpw_build_dir.path }}/platform-independent/cli-c/"
|
||||
|
||||
- name: Copy binary to system path
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
remote_src: true
|
||||
src: "{{ _mpw_build_dir.path }}/platform-independent/cli-c/mpw"
|
||||
dest: "/usr/local/bin"
|
||||
mode: 0755
|
||||
always:
|
||||
- name: Remove temporary directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ _mpw_build_dir.path }}"
|
||||
state: absent
|
||||
79
skylab/core/roles/workstation/tasks/install_multimc.yml
Normal file
79
skylab/core/roles/workstation/tasks/install_multimc.yml
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
- name: Check whether binary exists
|
||||
become: true
|
||||
ansible.builtin.stat:
|
||||
path: "~{{ local_username }}/.local/bin/MultiMC"
|
||||
register: _multimc_stat
|
||||
|
||||
- name: Install MultiMC
|
||||
when: (not _multimc_stat.stat.exists) or (force_reinstall | default(false))
|
||||
block:
|
||||
- name: Create temp dir
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
register: _multimc_tempdir
|
||||
|
||||
- name: Download and unpack distribution archive
|
||||
ansible.builtin.unarchive:
|
||||
src: https://files.multimc.org/downloads/mmc-stable-lin64.tar.gz
|
||||
remote_src: true
|
||||
dest: "{{ _multimc_tempdir.path }}"
|
||||
|
||||
- name: Ensure ~/.local/share/ exists
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: ~{{ local_username }}/.local/share
|
||||
state: directory
|
||||
owner: "{{ local_username }}"
|
||||
group: "{{ local_username }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Ensure ~/.local/bin/ exists
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: ~{{ local_username }}/.local/bin
|
||||
state: directory
|
||||
owner: "{{ local_username }}"
|
||||
group: "{{ local_username }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Copy MMC distribution to ~/.local/share/
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
remote_src: true
|
||||
src: "{{ _multimc_tempdir.path }}/MultiMC/"
|
||||
dest: "~{{ local_username }}/.local/share/multimc"
|
||||
owner: "{{ local_username }}"
|
||||
group: "{{ local_username }}"
|
||||
mode: 0700
|
||||
|
||||
- name: Link MMC binary into ~/.local/bin/
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
state: link
|
||||
src: ~{{ local_username }}/.local/share/multimc/MultiMC
|
||||
path: ~{{ local_username }}/.local/bin/MultiMC
|
||||
|
||||
- name: Copy application icon
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: multimc.png
|
||||
dest: ~{{ local_username }}/.local/share/icons/multimc.png
|
||||
owner: "{{ local_username }}"
|
||||
group: "{{ local_username }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Template application desktop entry
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: multimc.desktop.j2
|
||||
dest: ~{{ local_username }}/.local/share/applications/multimc.desktop
|
||||
owner: "{{ local_username }}"
|
||||
group: "{{ local_username }}"
|
||||
mode: 0755
|
||||
|
||||
always:
|
||||
- name: Delete temp dir
|
||||
ansible.builtin.file:
|
||||
path: "{{ _multimc_tempdir.path }}"
|
||||
state: absent
|
||||
0
skylab/core/roles/workstation/tasks/install_nvm.yml
Normal file
0
skylab/core/roles/workstation/tasks/install_nvm.yml
Normal file
27
skylab/core/roles/workstation/tasks/install_pipx.yml
Normal file
27
skylab/core/roles/workstation/tasks/install_pipx.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Create install directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /opt/pipx
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ skylab_group_admin.name }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Create install venv
|
||||
ansible.builtin.command:
|
||||
cmd: python3 -m venv /opt/pipx
|
||||
creates: /opt/pipx/bin/python
|
||||
|
||||
- name: Install pipx
|
||||
ansible.builtin.pip:
|
||||
name:
|
||||
- pipx
|
||||
executable: /opt/pipx/bin/pip
|
||||
|
||||
- name: Link pipx binary into system path
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
state: link
|
||||
src: /opt/pipx/bin/pipx
|
||||
path: /usr/local/bin/pipx
|
||||
1
skylab/core/roles/workstation/tasks/install_poetry.yml
Normal file
1
skylab/core/roles/workstation/tasks/install_poetry.yml
Normal file
@@ -0,0 +1 @@
|
||||
---
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user