Compare commits

...

148 Commits

Author SHA1 Message Date
4a516eee15 Stop assuming rockylinux has firewalld installed by default 2023-05-19 16:39:52 -04:00
15a1411f1a Add project resource assignments 2023-05-07 16:16:03 -04:00
868ab721dd Add scipio project 2023-05-07 16:06:53 -04:00
9776e9a316 Add skylab project definition 2023-05-07 16:04:25 -04:00
28f1f80d6f Remove pdb.enp.one 2023-05-07 15:49:14 -04:00
0f9479731a Update domains to use pointer vars instead of repeat values 2023-05-07 15:48:29 -04:00
3df0115191 Add CDN config for space 2023-05-07 15:43:39 -04:00
fcb25b79ce Add CDN space 2023-05-07 14:59:17 -04:00
e591db8581 Add auth subdomain 2023-05-04 16:23:59 -04:00
e4fd90c013 Restructure en1 main inventory group 2023-05-02 22:44:27 -04:00
219b03b4ee Add notify subdomain for scipio app 2023-05-02 22:44:26 -04:00
1b941a11a2 Add docs and notify subdomains to scipio doman 2023-05-02 18:02:20 -04:00
12991700b9 Disable hostkey checking on connection test task 2023-04-21 20:59:57 -04:00
02b6460cff Add ssh port update to bootstrap playbook
Update to use dynamic managment settings
Remove check for existing bootstrap directory
Fix re-using ansible password for root user
2023-04-19 18:42:04 -04:00
5f602c797f Add sanity connection check for bootstrap hosts 2023-04-18 21:56:02 -04:00
538bb26f83 Fix updating ssh keys before user exists 2023-04-18 21:39:32 -04:00
fa0df823ee Add main hosts for scipio domain 2023-04-14 15:11:45 -04:00
49eacf103c Add scipiocapital domain 2023-04-13 23:35:02 -04:00
4d1d28c64b Add pdb subdomain for grist 2023-04-11 17:49:33 -04:00
5803ea337e Add www subdomain for enp 2023-04-05 23:04:34 -04:00
20e9ec68d2 Improve docs on cloud playbook 2023-04-05 01:52:45 -04:00
d901c1d940 Add cloud deployment playbook for terraform operations 2023-04-05 01:39:25 -04:00
875d8f1538 Move terraform files to infra collection 2023-04-05 01:38:55 -04:00
1e1677cb4d Add terraform format precommit
Format terraform files
2023-04-04 16:54:01 -04:00
29bccbac02 Add img service cname 2023-04-04 16:47:22 -04:00
80015c6535 Add terraform proof of concept for DO domain management 2023-04-04 13:17:02 -04:00
3bcbee1b85 Update infra role meta to remove boilerplate 2023-04-04 12:20:37 -04:00
8f965c3e2b Restructure inventory for updated deployment schemas 2023-04-01 22:01:55 -04:00
88247b4011 Finalize and debug bootstrap playbook 2023-04-01 22:01:29 -04:00
740b73cb7d Update default to always prompt for vault password 2023-03-31 12:19:50 -04:00
857e83a6fe Add wip bootstrap playbook 2023-03-19 03:12:54 -04:00
745f6acc04 Add new infra collection 2023-03-19 01:45:43 -04:00
43fbb3993b Update makefile to use latest poetry command syntax 2023-03-19 01:45:23 -04:00
955d7e8a64 Enable hostkey checking and disable default forwarding 2023-03-19 01:35:47 -04:00
614fca41c0 Update with latest practices 2023-03-19 01:35:30 -04:00
0163d5ab18 Update transient deps 2023-03-19 01:15:18 -04:00
eb2ad9e60a Add task to link external media mount dir into user home directory 2022-07-11 01:12:31 -04:00
11235ab859 Fix typo in bashrc for setting ansible path 2022-05-24 23:22:19 -04:00
ce72850721 Change projects dir to home dir 2022-05-24 23:11:37 -04:00
d5f92811bd Add loading of local profile bash completions 2022-05-24 22:33:59 -04:00
2d26caba54 Fix typo in gg completions 2022-05-24 22:29:58 -04:00
36ce40d718 Fix wallpaper install task 2022-05-20 17:09:57 -04:00
a7d9e1b270 Add wallpaper for voyager workstation 2022-05-20 17:07:03 -04:00
a6d1d46236 Add fedora to list of supported OS's 2022-05-20 17:06:21 -04:00
d9c00a0d9e Fix user variable usage in completions task 2022-05-20 17:05:12 -04:00
f3008294e4 Add ansible project support to gg function 2022-05-20 17:04:50 -04:00
58dcf4694f Add voyager workstation to inventory 2022-05-20 16:57:50 -04:00
a822fe0915 Add completions integration to workstation env 2022-05-20 16:55:44 -04:00
cd1910c2bd Update SSH keys 2022-05-20 16:41:26 -04:00
421ceabd9e Add arc-theme to list of workstation packages
Whooo first commit from voyager!
2022-05-20 16:36:57 -04:00
068a33626d Add initial structure for workstation role 2022-05-16 21:57:31 -04:00
8b4fb71160 Add support for fedora workstations in core tooling 2022-05-16 21:57:12 -04:00
72d8e7cdde Add meta support for fedora 2022-04-25 21:10:07 -04:00
58128eec46 Add workstation target to ansible and enpaul users 2022-04-25 20:26:20 -04:00
48e7b8208e Add pingtest script for corona network monitor 2022-04-09 01:55:46 -04:00
1c417eda10 Add tox for static checks
Add toxdir to gitignore
Add ansible and yamllint
2022-01-08 22:16:40 -05:00
14ce2dfea6 Replace ansible dep with ansible-core
Add collection dependencies
2022-01-08 21:56:11 -05:00
e9974a054e Update swarm join timeout from 5min to 20min 2022-01-08 21:34:41 -05:00
f61baa3f04 Consolidate ansible settings into config file
Update docs
2021-12-28 00:27:46 -05:00
46e1366c4f Update service config spec to include published resources 2021-12-23 23:36:13 -05:00
924341a276 Update documentation 2021-12-23 23:27:00 -05:00
b36bbec72a Replace hacky ansible entrypoint script with dotenv file 2021-12-23 23:25:40 -05:00
7bb00a3586 Add environment check to collection linking script 2021-12-23 23:25:40 -05:00
9cd0cfcb4f Fix playbook import syntax 2021-12-21 17:45:01 -05:00
80c3565fa1 Update ansible script to use local collections path 2021-12-20 18:44:48 -05:00
fe0fc835cd Add logic for building local ansible 'virtualenv'
Add makefile and targets for building local dev environment
Add script for linking dev collections into local collection dir
Add local collection dir to gitignore
2021-12-20 18:44:48 -05:00
ed2fd510a5 Rename requirements file to keep consistent file ext 2021-12-20 18:34:45 -05:00
b3e2d1c887 Add community.docker collection dependency 2021-12-20 18:12:51 -05:00
6afb84b443 Add collection namespacing to playbook roles 2021-12-11 22:41:11 -05:00
5ead10afb9 Fix node ip presence check defaulting to /32 addresses
Fix typo in task name
2021-12-11 17:35:59 -05:00
4e1d50504d Rename playbooks to use _ instead of - for new collection structure 2021-12-11 16:58:36 -05:00
50e161f8dc Add 5min timeout to swarm join operation 2021-12-11 16:44:27 -05:00
3001e19c7e Restructore project into collection 2021-12-11 15:59:09 -05:00
85877f8431 Bump ansible to ^4.9 2021-12-11 15:59:09 -05:00
425761f0f5 Fix incorrect netdev type key name 2021-12-11 15:59:09 -05:00
1e0eb9b426 Add missing datastore config for pegasus 2021-12-11 15:59:08 -05:00
f791b43c86 Update nat rule number for adguard server 2021-12-11 15:59:08 -05:00
12ceb3558b Add bootstrap tasks to node up/down playbooks 2021-12-11 15:59:08 -05:00
eb1ff31e30 Add initial nginx config to deployment playbook 2021-12-11 15:59:08 -05:00
d611301f8a Add initial (incomplete) stack deployment playbook 2021-12-11 15:59:08 -05:00
03574c1560 Fix misnamed automation account for minecraft user 2021-12-11 15:59:08 -05:00
ea2f797b30 Fix invalid service name for _meta 2021-12-11 15:59:08 -05:00
687e189b18 Add initial config for dedicated monitoring server 2021-11-16 23:21:23 -05:00
37b22c7ef5 Add full FQDN to iridium 2021-11-16 23:21:23 -05:00
cf22d27c57 Remove hacky and frankly embarassing role usage pattern 2021-11-15 20:56:02 -05:00
b4feffc118 Fix backwards y in server motd 2021-11-15 19:59:34 -05:00
0c95df3066 Update access targets to be optional parameter
Add server role to iridium
2021-11-15 19:53:19 -05:00
be9c658589 Port docker-compose resources and service details 2021-11-15 01:31:39 -05:00
eb569c05c7 Add install of gluster storage plugin to swarm role 2021-11-13 21:09:54 -05:00
f178a7bf78 Update cluster config var to include public+private addresses 2021-11-13 19:34:59 -05:00
20450332d4 Add playbook for restoring offline'd node 2021-11-11 00:34:05 -05:00
776e35f1a3 Update node update config to use node id rather than magic name 2021-11-11 00:07:58 -05:00
4275a0bc23 Add optional support for passing datastore encryption key from caller 2021-11-10 23:54:25 -05:00
674d432773 Add service management for glusterd 2021-11-10 23:49:25 -05:00
05b475c464 Add tmpfs mount enable to server role 2021-11-10 23:31:31 -05:00
a0be654b92 Add role for adding docker nodes to swarm 2021-11-10 21:59:04 -05:00
8c69b7af95 Update cluster interface config to specify internal and external 2021-11-10 21:03:48 -05:00
28af9314ef Add jq to global packages 2021-11-10 01:30:34 -05:00
450d8fcb7a Add documentation for datastore role 2021-11-10 00:41:55 -05:00
01c0e21f94 Add datastore role for validating config of datastore setup 2021-11-10 00:22:43 -05:00
c11e492f8f Add legacy local aliases to hosts config 2021-11-10 00:17:40 -05:00
e298d5afa2 Add firewall configuration to server role 2021-11-09 20:59:45 -05:00
bcbdd75185 Update motd again to fix that wonky y 2021-11-09 00:35:30 -05:00
8ac7e0f5a3 Add hostfile management to server role
Add aliases for direct connections of the cluster servers
2021-11-09 00:26:28 -05:00
197157b830 Update state directory to mabe not break itself on every reboot 2021-11-09 00:03:05 -05:00
4069d8a77a Fix ssh auth config settings labels 2021-11-06 22:14:20 -04:00
fe0cd3ab67 Reorganize network spec to more accurately reflect settings 2021-11-06 22:06:12 -04:00
2cff4e4354 Fix bootstrap instructions to include group id 2021-11-06 20:46:32 -04:00
b4f9fba952 Update the mmotd to improve readability 2021-11-06 20:31:12 -04:00
8f805c3b15 Move hostname parameter to separate variable
Update inventory to support hubble
2021-11-06 20:31:12 -04:00
742ef24a77 Roll passwords 2021-11-06 20:31:11 -04:00
f66a1fb8cc Fix networkd templating integration and access permissions 2021-11-06 01:19:37 -04:00
d24a9b2713 Fix deletion of old sudoers files 2021-11-06 01:16:05 -04:00
487e41c058 Add networkd configuration to server role
Update inventory with necessary networking settings
2021-11-06 00:37:32 -04:00
ce799cceaa Simplify sudoers config by using copy content parameter
Delete unneded file
2021-11-06 00:37:31 -04:00
12eabe1351 Move tasks from update to provision
Move global bashrc and universal package install from update to provision playbooks
2021-11-06 00:37:31 -04:00
4a21c792e1 Add task to disable case sensitive tab completion 2021-11-06 00:37:31 -04:00
9dd76a9161 Update motd to fix malformed y 2021-11-06 00:37:31 -04:00
ec6106c73e Update ps1 to fix path formatting 2021-11-05 23:46:46 -04:00
f39804e621 Update motd with 2021-11-05 23:25:16 -04:00
ea6ae01f76 Update to allow override of hostname with FQDN 2021-11-05 23:11:45 -04:00
202de6d2b4 Fix mismatched access targets for cluster operations 2021-11-05 23:11:30 -04:00
cf0380aee4 Add pre-provisioning bootstrap checklist 2021-11-05 23:09:19 -04:00
4563957e80 Fix cache failure with epel on first install 2021-11-05 22:40:57 -04:00
7546c88ee4 Add cluster roles to inventory 2021-11-05 22:31:11 -04:00
96ea66b77a Reorganize provision playbook
Split server-specific configs out into server role

Add symlink to roles for playbook directory
2021-11-05 22:31:09 -04:00
732cf53192 Add general configuration playbook
Add meta taskfile for bootstraping remote venv
2021-11-05 21:53:03 -04:00
6819e6b4cb Consolidate ssh config tasks 2021-11-05 21:40:20 -04:00
9e0c0505b3 Add playbook for fully shutting down a host 2021-10-28 00:35:31 -04:00
7ea4d070ee Fix variable scoping in node-down playbook 2021-10-28 00:35:19 -04:00
8f2ffd6619 Add playbook for offline'ing a cluster host safetly 2021-10-28 00:27:15 -04:00
622481e231 Update inventory with initial content
Add cluster hosts and initial networking spec
Add core host for core router
2021-10-28 00:27:15 -04:00
0bbc4ec1cd Add ipython as dev dependency 2021-10-27 22:55:46 -04:00
ec4fba16d6 Add netaddr to support ipaddress filters 2021-10-27 22:00:52 -04:00
8fd063d21d Add runtime group assignment based on OS platform 2021-09-08 21:07:14 -04:00
2814d42148 Add network infrastructure
Add network group for filtering network hosts
Add network target for auth'ing to network hosts
Update playbooks to filter out network targets
2021-09-07 20:31:18 -04:00
d05c0cffaa Fix env var issues with hacky ansible script 2021-09-07 20:17:05 -04:00
3f4c54f62b Update motd 2021-09-06 01:05:33 -04:00
091c5a78a6 Add playbook for updating existing settings 2021-09-06 00:45:55 -04:00
d45facb76d Add universal package installation 2021-09-05 23:19:41 -04:00
b1a247904c Update motd banner 2021-09-05 23:03:11 -04:00
3e0652bd5f Fix file extension on provision playbook
This is gonna be a bad habit
2021-09-05 22:57:47 -04:00
85d81ec769 Add initial provisioning playbook 2021-09-05 22:54:48 -04:00
e77db3d473 Add access vars and target to hubble 2021-09-05 00:52:12 -04:00
cf187d2217 Update MD files to use mdformat 2021-09-05 00:01:50 -04:00
1aebb0b339 Add hacky ansible script to set environment parameters 2021-09-05 00:01:03 -04:00
f6b43cfc98 Add initial inventory with hubble server 2021-09-04 23:57:34 -04:00
84053b3ce6 Remove SSH compression and INI inventory plugin 2021-09-04 23:57:03 -04:00
112 changed files with 8464 additions and 770 deletions

9
.ansible-lint.yaml Normal file
View File

@@ -0,0 +1,9 @@
---
skip_list:
- line-length # don't yell about line length
- meta-no-info # we don't publish to galaxy so stop yelling about it
- package-latest # we install lots of latest stuff still 😢
- experimental # no instability plz, give us a call when ur stable
warn_list:
- no-handler # good to keep, but shouldn't be fatal

4
.gitignore vendored
View File

@@ -5,3 +5,7 @@ playbooks/testing.yml
*.idea
**/__pycache__/
.venv/
.ansible/
.tox/
.terraform/
.terraform.lock.*

View File

@@ -32,3 +32,11 @@ repos:
- "--wrap=90"
types:
- markdown
- id: terraform
name: terraform format
entry: terraform
language: system
args:
- fmt
files: ".*\\.tf$"

7
.yamllintrc.yaml Normal file
View File

@@ -0,0 +1,7 @@
---
yaml-files:
- "*.yml"
- "*.yaml"
rules:
line-length: disable

View File

@@ -9,4 +9,9 @@ to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.**
**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.**

9
Makefile Normal file
View File

@@ -0,0 +1,9 @@
clean:
rm --recursive --force .ansible/
rm --recursive --force .tox/
dev:
@poetry install --sync
@poetry run pre-commit install
@poetry run ansible-galaxy collection install --requirements-file ./requirements.yaml --collections-path ./.ansible
@bash ./link-local-collections.sh

View File

@@ -2,3 +2,28 @@
Ansible configs for the Skylab Homelab
## Local workstation setup:
```bash
make dev
poetry run ansible-playbook ...
```
## Boostraping remote system for management:
1. Install a supported operating system: [Rocky Linux](https://rockylinux.org),
[Fedora](https://getfedora.org)
2. During installation create a user named `ansible` with any password
3. After installation copy SSH key to the `ansible` user
4. Enable password-less sudo access for the `ansible` user with this command:
```bash
sudo tee /etc/sudoers.d/30-ansible <<<"ansible ALL=(ALL) NOPASSWD:ALL"
```
5. Change the UID/GID of the `ansible` user/group to `1400` with these commands:
```bash
sudo usermod -u 1400 ansible
sudo groupmod -g 1400 ansible
```

View File

@@ -1,8 +1,10 @@
[defaults]
host_key_checking = false
host_key_checking = true
collections_path = .ansible
inventory = inventory/
[ssh_connection]
ssh_args = "-C -o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes"
ssh_args = "-o ControlMaster=auto -o ControlPersist=60s"
[inventory]
enable_plugins = yaml
enable_plugins = ansible.builtin.yaml

166
inventory/en1.old.yaml Normal file
View File

@@ -0,0 +1,166 @@
---
workstation:
hosts:
voyager:
skylab_description: Personal Workstation
skylab_hostname: voyager.skylab.enp.one
skylab_targets: [workstation]
en1:
vars:
skylab_location: Newton MA
skylab_dashboard: info.en1.local
# gross hack for now, will be refactored later
_skylab_adguard_nat_rule: 9
hosts:
core:
ansible_host: 10.42.101.1
ansible_port: 4242
ansible_network_os: edgeos
skylab_description: EN1 Core Router
iridium:
ansible_host: 10.42.101.200
skylab_description: Local Monitor Node
skylab_hostname: iridium.skylab.enp.one
skylab_targets: [network]
skylab_networking:
enp4s0:
firewall: internal
dhcp: false
gateway: 10.42.101.1/24
dns:
- 10.42.101.1
addresses:
- 10.42.101.200/24
children:
cluster:
vars:
skylab_targets: [cluster, datastore]
skylab_compose_version: 3.8
skylab_compose_dir: "{{ skylab_state_dir }}/compose"
hosts:
pegasus: # jupiter
ansible_host: 10.42.101.100
skylab_hostname: pegasus.skylab.enp.one
skylab_legacy_names:
- jupiter.net.enp.one
- jupiter.svr.local
skylab_description: Arbiter Node
skylab_cluster:
address:
access: 10.42.101.10/24
internal: 192.168.42.10/24
interface:
access: bond0
internal: bond0.99
skylab_datastore_device: sdb
skylab_networking:
eno1:
bond: bond0
eno2:
bond: bond0
bond0:
device: bond
firewall: internal
gateway: 10.42.101.1/24
dns:
- 10.42.101.1
addresses:
- 10.42.101.100/24
- 192.168.255.255/32
dhcp: false
bond0.99:
device: vlan
firewall: trusted
addresses:
- 192.168.42.10/24
dhcp: false
saturn: # remus
ansible_host: 10.42.101.110
skylab_hostname: saturn.skylab.enp.one
skylab_legacy_names:
- remus.net.enp.one
- remus.svr.local
skylab_description: Operational Node
skylab_cluster:
address:
access: 10.42.101.11/24
internal: 192.168.42.20/24
interface:
access: bond0
internal: bond0.99
skylab_networking:
eno1:
bond: bond0
eno2:
bond: bond0
bond0:
device: bond
firewall: internal
dhcp: false
gateway: 10.42.101.1/24
addresses:
- 10.42.101.110/24
- 192.168.255.255/32
dns:
- 10.42.101.1
bond0.99:
device: vlan
firewall: trusted
dhcp: false
addresses:
- 192.168.42.20/24
orion: # romulus
ansible_host: 10.42.101.120
skylab_hostname: orion.skylab.enp.one
skylab_legacy_names:
- romulus.net.enp.one
- romulus.svr.local
skylab_description: Operational Node
skylab_cluster:
address:
access: 10.42.101.12/24
internal: 192.168.42.30/24
interface:
access: bond0
internal: bond0.99
skylab_datastore_device: sdb
skylab_networking:
eno1:
bond: bond0
eno2:
bond: bond0
bond0:
device: bond
firewall: internal
gateway: 10.42.101.1/24
dns:
- 10.42.101.1
addresses:
- 10.42.101.120/24
- 192.168.255.255/32
dhcp: false
bond0.99:
device: vlan
firewall: trusted
addresses:
- 192.168.42.30/24
dhcp: false
en2:
vars:
skylab_location: DigitalOcean TOR1
hosts:
hubble:
ansible_host: en2a.enp.one
skylab_hostname: hubble.en2.enp.one
skylab_description: Cloud Web Server
skylab_targets: [cloud]

51
inventory/en1.yaml Normal file
View File

@@ -0,0 +1,51 @@
---
en1:
vars:
skylab_location: Cambridge
children:
domain:
children:
cluster:
hosts:
canaveral:
ansible_host: 10.42.101.10
skylab_description: Compute and Storage Node
baikonur:
ansible_host: 10.42.101.11
skylab_description: Compute and Storage Node
vandenberg:
ansible_host: 10.42.101.12
skylab_description: Compute and Storage Node
andoya:
ansible_host: 10.42.101.13
skylab_description: Auxilary Compute Node
jiuquan:
ansible_host: 10.42.101.14
skylab_description: Auxilary Compute Node
datastore:
hosts:
canaveral:
skylab_datastore_block: /dev/sda
baikonur:
skylab_datastore_block: /dev/sda
vandenberg:
skylab_datastore_block: /dev/sda
hosts:
3d-printer: {}
mediastore: {}
backstore: {}
local:
hosts:
core: {}
switch-1: {}
switch-2: {}
wap-1: {}
wap-2: {}
wap-3: {}
printer: {}

View File

@@ -0,0 +1,39 @@
---
ansible_user: ansible
ansible_port: 4242
skylab_state_dir: /var/lib/skylab
skylab_ansible_venv: "{{ skylab_state_dir }}/ansible-runtime"
skylab_ansible_vault_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
61323762623165383963316238343539346336663864366631616339356564346636373561616237
6666363531393234636337656431366365343236346536320a346163353935366636303131313661
32623635363063383039363539303135393838376264356463646465376435616363376163373663
6366633665373939380a373234633365376632376433643034336539346338613566353537663731
34323464633165626133306464363464333539363761343831316565356266373833
skylab_tfstate_backend:
hostname: cluster.lab.enp.one
username: terraform
schema: terraform
port: 32421
password: !vault |
$ANSIBLE_VAULT;1.1;AES256
30313365393065316563323363663135313438616461356439366632303636343735653033363930
6334613931376566363064663539643639326363663933610a306138616362376435386466306538
30626330613932363339363438356430613461313335333536623931343436353330393433373630
3631343463616631380a386661336534663033383637666538316665303962353034376232356235
65323339353563623431666535366465353133343137653232326534326436323661636536373564
3466633762303966366366653531613261336561356531636461
skylab_mgmt:
sshport: 4242
group: skylab
user: ansible
id: 1400
sshkeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP5TGKururOa1Y+cbv8AWXYI5zhfZCDV0fsBG+33IYUc enpaul@ansible.voyager
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBf7i/8hSJDYnoD95noCJJVtSxxCp9N5EmnshALufiwm enpaul@ansible.opportunity

28
link-local-collections.sh Executable file
View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
PWD=$(pwd)
ANSIBLE_NAMESPACE="skylab"
ANSIBLE_COLLECTION_DIR="$PWD/.ansible/ansible_collections"
mkdir --parents "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE"
for collection_path in "$PWD"/"$ANSIBLE_NAMESPACE"/*; do
collection=$(basename "$collection_path")
if [[ ! -L "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE/$collection" ]]; then
echo "Linking $ANSIBLE_NAMESPACE.$collection into $ANSIBLE_COLLECTION_DIR"
rm --recursive --force "${ANSIBLE_COLLECTION_DIR:?}/$ANSIBLE_NAMESPACE/$collection"
ln --symbolic "$PWD/$ANSIBLE_NAMESPACE/$collection" "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE/$collection"
fi
done
echo "Finished linking local collections"
LOCAL_COLLECTION_PATH=$(dirname "$ANSIBLE_COLLECTION_DIR")
if [ -z ${ANSIBLE_COLLECTIONS_PATH+x} ]; then
echo "WARNING: Environment variable ANSIBLE_COLLECTIONS_PATH is not set, collections will not be callable"
echo " HINT: export ANSIBLE_COLLECTIONS_PATH=$LOCAL_COLLECTION_PATH"
elif [[ ${ANSIBLE_COLLECTIONS_PATH} != *"$LOCAL_COLLECTION_PATH"* ]]; then
echo "WARNING: Environment variable ANSIBLE_COLLECTIONS_PATH does not include local collection directory"
echo " HINT: export ANSIBLE_COLLECTIONS_PATH=\$ANSIBLE_COLLECTIONS_PATH:$LOCAL_COLLECTION_PATH"
fi

2701
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,20 +6,22 @@ authors = ["Ethan Paul <me@enp.one>"]
license = "MIT"
[tool.poetry.dependencies]
python = "^3.8"
ansible = "^3.4.0"
docker = "^4.2.0"
docker-compose = "^1.25.4"
python = "^3.10"
ansible-core = "^2.14.3"
docker = "^6.0.1"
paramiko = "^2.7.1"
jsondiff = "^1.2.0"
jsondiff = "^2.0.0"
netaddr = "^0.8.0"
[tool.poetry.dev-dependencies]
ansible-lint = "^4.2.0"
pre-commit = "^2.9.2"
pre-commit-hooks = "^3.3.0"
safety = "^1.9.0"
ansible-lint = {version = "^6.14.0", markers = "platform_system != 'Windows'"}
ipython = "^8.11.0"
mdformat = "^0.7.16"
mdformat-gfm = "^0.3.5"
poetry = "^1.3.0"
pre-commit = "^3.2.0"
pre-commit-hooks = "^4.4.0"
safety = "^2.3.5"
tox = "^3.20.1"
tox-poetry-installer = "^0.8.1"
yamllint = "^1.20.0"
mdformat = "^0.7.9"
mdformat-gfm = "^0.3.3"
tox-poetry-installer = {extras = ["poetry"], version = "^0.10.0"}
yamllint = "^1.29.0"

4
requirements.yaml Normal file
View File

@@ -0,0 +1,4 @@
---
collections:
- source: ./skylab/
type: subdirs

3
skylab/core/README.md Normal file
View File

@@ -0,0 +1,3 @@
# Ansible Collection - skylab.core
Documentation for the collection.

26
skylab/core/galaxy.yml Normal file
View File

@@ -0,0 +1,26 @@
---
namespace: skylab
name: core
version: 0.0.0
description: Network deployment procedures and configuration state management
authors:
- Ethan Paul <me@enp.one>
license:
- MIT
readme: README.md
tags: []
repository: https://vcs.enp.one/skylab/skylab-ansible/
build_ignore: []
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies:
ansible.netcommon: ">=2.5.0,<3.0.0"
ansible.posix: ">=1.3.0,<2.0.0"
ansible.utils: ">=2.4.3,<3.0.0"
community.docker: ">=2.0.2,<3.0.0"
community.network: ">=3.0.0,<4.0.0"
community.general: ">=4.1.0,<5.0.0"
community.crypto: ">=1.0.0,<2.0.0"

View File

@@ -0,0 +1,2 @@
---
requires_ansible: ">=2.11,<2.15"

View File

@@ -0,0 +1,47 @@
---
- name: Group hosts by platform
hosts: all
tags:
- always
pre_tasks:
- include_tasks: tasks/meta/runtime-group-determination.yaml
- name: Bootstrap remote ansible environment
hosts: linux
gather_facts: false
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
# [lemony snicket voice] "server" here being a word used to mean "not a workstation"
- name: Configure servers
hosts: linux:!workstation
gather_facts: false
roles:
- role: skylab.core.server
- name: Configure cluster
hosts: linux:&cluster
gather_facts: false
roles:
- role: skylab.core.datastore
- role: skylab.core.swarm
- name: Configure dashboard nodes
hosts: iridium
gather_facts: false
roles:
- role: skylab.core.dashboard
dashboard_hostname: "{{ skylab_dashboard }}"
- name: Configure workstations
hosts: workstation
gather_facts: false
roles:
- role: skylab.core.workstation

View File

@@ -0,0 +1,200 @@
---
- name: Bootstrap remote ansible environment
hosts: linux
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
- name: Clean up old orechestration data
hosts: cluster
gather_facts: false
tags:
- cleanup
vars_files:
- vars/services.yaml
- vars/access.yaml
tasks:
- name: Create compose storage directory
become: true
ansible.builtin.file:
path: "{{ skylab_compose_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0770
- name: Fetch existing compose files
ansible.builtin.command:
cmd: /usr/bin/ls {{ skylab_compose_dir }}
changed_when: false
register: _compose_contents_raw
- name: Remove legacy compose files
when: item.replace('.yaml', '') not in skylab_services
ansible.builtin.file:
path: "{{ skylab_compose_dir }}/{{ item }}"
state: absent
loop: "{{ _compose_contents_raw.stdout_lines }}"
- name: Fetch existing stacks
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_stack_info: {}
register: _stack_info
- name: Remove legacy stacks
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
when: item.Orchestrator == 'Swarm' and item.Name not in skylab_services
community.docker.docker_stack:
name: "{{ item.Name }}"
state: absent
loop: "{{ _stack_info.results }}"
loop_control:
label: "{{ item.Name }}"
- name: Fetch existing Nginx configs
ansible.builtin.command:
cmd: ls {{ local_datastore_mount }}/appdata/nginx/conf.d/
changed_when: false
register: _nginx_configs
- name: Remove legacy nginx configs
when: item.replace('.conf', '') not in skylab_services
ansible.builtin.file:
path: "{{ local_datastore_mount }}/appdata/nginx/conf.d/{{ item }}.conf"
state: absent
loop: "{{ _nginx_configs.stdout_lines }}"
- name: Deploy stack service{{ (' ' + service) if service is defined else 's' }}
hosts: cluster
gather_facts: false
vars:
local_datastore_mount: /mnt/datastore
vars_files:
- vars/access.yaml
- vars/services.yaml
tasks:
- name: Validate user input
when: service is defined
ansible.builtin.assert:
that:
- service in skylab_services
- name: Determine service stacks to deploy
ansible.builtin.set_fact:
_services: "{{ {service: skylab_services[service]} if service is defined else skylab_services }}"
- name: Determine app account mapping
vars:
_service_accounts: {}
when: item.service | default(false)
ansible.builtin.set_fact:
_service_accounts: "{{ _service_accounts | combine({item.name: item}) }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.name }}"
- name: Create compose directory
become: true
ansible.builtin.file:
path: "{{ skylab_compose_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0770
- name: Install compose file
vars:
app: "{{ item.value }}"
_app_account: "{{ _service_accounts[item.value.user] if item.value.user is defined else false }}"
ansible.builtin.template:
src: docker-compose/{{ item.key }}.yaml.j2
dest: "{{ skylab_compose_dir }}/{{ item.key }}.yaml"
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0660
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create automation groups
become: true
when: item.value.user is defined
ansible.builtin.group:
name: "{{ item.value.user }}"
gid: "{{ _service_accounts[item.value.user].uid }}"
state: present
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create automation accounts
become: true
when: item.value.user is defined
ansible.builtin.user:
name: "{{ item.value.user }}"
state: present
uid: "{{ _service_accounts[item.value.user].uid }}"
group: "{{ item.value.user }}"
groups: "{{ [skylab_group_automation.name, skylab_group.name] }}"
system: true
generate_ssh_key: false
password: "{{ _service_accounts[item.value.user].password }}"
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Configure datastore directories
run_once: true
block:
- name: Determine volume directories
vars:
_stack_volume_directories: []
when: item.value.volumes is defined
ansible.builtin.set_fact:
_stack_volume_directories: "{{ _stack_volume_directories + [{'user': (item.value.user | default(ansible_user)), 'volumes': (item.value.volumes.values() | list)}] }}"
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create service directories
become: true
ansible.builtin.file:
path: "{{ local_datastore_mount }}{{ item.1 }}"
state: directory
owner: "{{ item.0.user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0770
loop: "{{ _stack_volume_directories | subelements('volumes') }}"
- name: Deploy stack
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_stack:
name: "{{ item.key }}"
compose:
- "{{ skylab_compose_dir }}/{{ item.key }}.yaml"
prune: false
state: present
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Configure reverse proxy
run_once: true
block:
- name: Create nginx config
when: item.value.domain is defined
ansible.builtin.template:
src: stack-nginx.conf.j2
dest: "{{ local_datastore_mount }}/appdata/nginx/conf.d/{{ item.key }}.conf"
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0464
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.value.domain | default(item.key) }}"

View File

@@ -0,0 +1,37 @@
function _parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[37m\]:\w\[\e[33m\]\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\[\e[0;97m\]$\[\e[0m\] "
export rc=/home/$USERNAME/.bashrc
export VIRTUALENV_DIR=/home/$USERNAME/.venvs
export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-bundle.crt
function random() {
if [[ $# -eq 0 ]]; then
num=32
else
num=$1
fi
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $num | head -n 1
}
function up() { cd $(eval printf '../'%.0s {1..$1}); }
function pipin() { pip freeze | grep $1; }
function continuous () { while true; do ${@}; sleep 3; done; }
alias bk='cd -'
alias fuck='sudo $(history -p \!\!)'
alias version='uname -orp && lsb_release -a | grep Description'
alias activate='source ./bin/activate'
alias cls='clear'
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
alias date='/usr/bin/date --iso-8601=seconds'
alias whatismyip='curl https://icanhazip.com/'
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
alias epoch="python3 -c 'import time; print(time.time());'"
alias uptime="command uptime --pretty"
alias unmount="umount"

View File

@@ -0,0 +1,137 @@
#!/usr/bin/env bash
set -o pipefail
declare FMT_RESET
declare FMT_BOLD
declare FMT_GREEN
declare FMT_RED
declare NL
FMT_RESET=$(printf "\\e[0m")
FMT_BOLD=$(printf "\\e[1m")
FMT_GREEN=$(printf "\\e[32m")
FMT_RED=$(printf "\\e[31m")
NL=$'\n'
readonly FMT_RESET
readonly FMT_BOLD
readonly FMT_GREEN
readonly FMT_RED
readonly NL
function usage() {
cat << __EOF__
${FMT_GREEN}$(basename "$0")${FMT_RESET}: \
Ping hosts and print status
${FMT_BOLD}Usage:${FMT_RESET}
$(basename "$0") [-h] [--service|--network]
${FMT_GREEN}-h --help${FMT_RESET}
Print this message and exit.
${FMT_GREEN}--services${FMT_RESET}
Report service status
${FMT_GREEN}--network${FMT_RESET}
Report network status
__EOF__
}
function _fmt_online() { echo "${FMT_BOLD}${FMT_GREEN}ONLINE${FMT_RESET}"; }
function _fmt_offline() { echo "${FMT_BOLD}${FMT_RED}OFFLINE${FMT_RESET}"; }
function _test_cmd() { if eval "$1" &>/dev/null ; then echo "${2}~$(_fmt_online)"; else echo "${2}~$(_fmt_offline)"; fi }
function _test_ping() { _test_cmd "ping -W 2 -c 1 ${1}" "${2}"; }
function _test_curl_head() { _test_cmd "curl --fail --head ${1}" "${2}"; }
function _test_curl_get() { _test_cmd "curl --fail --get ${1}" "${2}"; }
function _test_curl_insecure() { _test_cmd "curl --fail --head --insecure ${1}" "${2}"; }
function _test_netcat() { _test_cmd "nc -z ${1} ${2}" "${3}"; }
function network() {
local uplink_address="1.1.1.1"
declare -a infra=("core.en1.local" "switch.en1.local" "wap-1.en1.local" "wap-2.en1.local" "wap-3.en1.local" "printer.en1.local")
declare -a infra_names=("Core Router" "Core Switch" "Wireless AP 1" "Wireless AP 2" "Wireless AP 3" "Printer")
declare -a lab=("cluster.skylab.enp.one" "pegasus.skylab.enp.one" "saturn.skylab.enp.one" "orion.skylab.enp.one" "iridium.skylab.enp.one" "en2.enp.one")
declare -a lab_names=("Cluster" "Pegasus" "Saturn" "Orion" "Iridium" "Hubble")
local output=$(_test_ping "$uplink_address" "UPLINK")
output+="${NL}";
output+="${NL}INFRASTRUCTURE~STATE${NL}"
for (( index=0; index<"${#infra[@]}"; index++ )); do
output+=$(_test_ping "${infra[$index]}" "${infra_names[$index]}")
output+="${NL}"
done
output+="${NL}HOMELAB~STATE${NL}"
for (( index=0; index<"${#lab[@]}"; index++ )); do
output+=$(_test_ping "${lab[$index]}" "${lab_names[$index]}")
output+="${NL}"
done
column -e -t -s '~' <<<"$output"
}
function services() {
local output="INTERNAL~STATE${NL}"
output+=$(_test_netcat "cluster.skylab.enp.one" "53" "AdGuard DNS")
output+="${NL}"
output+=$(_test_netcat "core.en1.local" "53" "Fallback DNS")
output+="${NL}"
output+=$(_test_curl_insecure "https://cluster.skylab.enp.one:8443/status" "Ubiquiti WLC")
output+="${NL}"
output+="${NL}PUBLIC~STATE${NL}"
output+=$(_test_curl_head "https://pms.enp.one/web/index.html" "Plex Media Server")
output+="${NL}"
output+=$(_test_netcat "cluster.skylab.enp.one" "25565" "Minecraft Server")
output+="${NL}"
output+=$(_test_curl_get "https://vcs.enp.one/api/v1/version" "Version Control")
output+="${NL}"
output+=$(_test_curl_get "https://ssv.enp.one/api/alive" "Bitwarden")
output+="${NL}"
output+=$(_test_curl_head "https://cdn.enp.one/heartbeat" "Digital Ocean CDN")
output+="${NL}"
output+=$(_test_curl_head "https://doc.enp.one/" "Documentation")
output+="${NL}"
output+=$(_test_curl_head "https://enpaul.net/" "enpaul.net")
output+="${NL}"
output+=$(_test_curl_head "https://allaroundhere.org/" "allaroundhere.org")
output+="${NL}"
output+=$(_test_curl_head "https://enp.one/" "enp.one")
output+="${NL}"
column -e -t -s'~' <<<"$output"
}
function main() {
if [[ "$1" =~ ^(-h|--help)$ ]]; then
usage;
return 0
fi
if [[ "$1" = "--network" ]]; then
network;
return 0
fi
if [[ "$1" = "--services" ]]; then
services;
return 0
fi
}
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
main "${@}"
fi

View File

@@ -0,0 +1,28 @@
attrs==21.2.0; python_version >= "3.4" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.4"
bcrypt==3.2.0; python_version >= "3.6"
certifi==2021.5.30; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
cffi==1.14.6; python_version >= "3.6" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.4.0")
charset-normalizer==2.0.4; python_full_version >= "3.6.0" and python_version >= "3.6"
colorama==0.4.4; python_version >= "3.4" and python_full_version < "3.0.0" and sys_platform == "win32" or sys_platform == "win32" and python_version >= "3.4" and python_full_version >= "3.5.0"
cryptography==3.4.8; python_version >= "3.6"
distro==1.6.0; python_version >= "3.4" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.4"
docker-compose==1.29.2; python_version >= "3.4"
docker==5.0.2; python_version >= "3.6"
dockerpty==0.4.1; python_version >= "3.4"
docopt==0.6.2; python_version >= "3.4"
idna==3.2; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
jsondiff==1.3.0
jsonschema==3.2.0; python_version >= "3.4"
paramiko==2.7.2; python_version >= "3.6"
pycparser==2.20; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.4.0"
pynacl==1.4.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.4.0"
pyrsistent==0.18.0; python_version >= "3.6"
python-dotenv==0.19.0; python_version >= "3.5"
pywin32==227; sys_platform == "win32" and python_version >= "3.6"
pyyaml==5.4.1; python_version >= "3.4" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.4"
requests==2.26.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
selinux==0.2.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0")
six==1.16.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
texttable==1.6.4; python_version >= "3.4"
urllib3==1.26.6; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6"
websocket-client==0.59.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"

View File

@@ -0,0 +1,132 @@
---
- name: Check cluster state
hosts: cluster
any_errors_fatal: true
pre_tasks:
- name: Configure remot execution environment
ansible.builtin.import_tasks: tasks/meta/bootstrap-remote-env.yaml
tasks:
- name: Validate user input
run_once: true
ansible.builtin.assert:
that:
- node is defined
- node in groups.cluster
fail_msg: >-
ERROR: Please set the 'node' variable to the cluster host to offline
(one of: {{ groups.cluster | join(', ') }})
- name: Fetch node swarm ID
ansible.builtin.command:
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
changed_when: false
register: _docker_node_id_raw
- name: Fetch swarm node availability
ansible.builtin.command:
cmd: docker node inspect {{ _docker_node_id_raw.stdout.strip() }} --format '{{ '{{ .Spec.Availability}}' }}'
changed_when: false
register: _docker_node_availability_raw
- name: Set common facts
ansible.builtin.set_fact:
_target_node: "{{ node }}"
_docker_node_id: "{{ _docker_node_id_raw.stdout.strip() }}"
_docker_node_availability: "{{ _docker_node_availability_raw.stdout.strip() }}"
# Use the next host in the group, unless that would exceed the length of the group,
# in which case use the first host in the group
_target_alt: >-
{{ groups.cluster[
lookup('ansible.utils.index_of', groups.cluster, 'eq', node) + 1
if (lookup('ansible.utils.index_of', groups.cluster, 'eq', node) + 1) < (groups.cluster | length)
else 0]
}}
# I'm not sure how to do this without invoking a loop, so here we are
- name: Set common fact for node addresses
vars:
_node_addresses:
- "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.address }}"
ansible.builtin.set_fact:
_node_addresses: "{{ _node_addresses + [item.address] }}"
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4_secondaries }}"
loop_control:
label: "{{ item.address }}"
- name: Set facts for target node
when: inventory_hostname == _target_node
ansible.builtin.set_fact:
_needs_docker_migration: "{{ (_docker_node_availability | lower != 'drain') | bool }}"
- name: Check cluster settings
when: inventory_hostname != _target_node
ansible.builtin.assert:
that:
- skylab_cluster.address.access | ansible.netcommon.ipaddr('address') in _node_addresses
- _docker_node_availability | lower == 'active'
fail_msg: >-
ERROR: Node '{{ inventory_hostname }}' is already marked as unavailable. All cluster
nodes must be available before a new node can be moved to unavailable status.
- name: Offline node
hosts: "{{ node }}"
tasks:
- name: Migrate services off target node
when: _needs_docker_migration
block:
- name: Fetch current cluster service state
ansible.builtin.command:
cmd: !unsafe docker service ls --format '{{json .}}'
changed_when: false
register: _cluster_service_prestate
- name: Disable NAT rule {{ _skylab_adguard_nat_rule }}
delegate_to: core
connection: ansible.netcommon.network_cli
community.network.edgeos_config:
lines:
- set service nat rule {{ _skylab_adguard_nat_rule }} disable
- name: Update node availability
vars:
ansible_python_interpreter: "{{ skylab_state_dir }}/ansible-runtime/bin/python"
community.docker.docker_node:
availability: drain
hostname: "{{ _docker_node_id }}"
register: _node_availability_status
- name: Wait for services to shutdown
ansible.builtin.pause:
seconds: 10
- name: Wait for services to migrate
ansible.builtin.command:
cmd: !unsafe docker service ls --format '{{json .}}'
changed_when: false
register: _cluster_service_poststate
until: _cluster_service_poststate.stdout == _cluster_service_prestate.stdout
retries: 120
delay: 5
- name: Enable NAT rule {{ _skylab_adguard_nat_rule }}
delegate_to: core
connection: ansible.netcommon.network_cli
community.network.edgeos_config:
lines:
- delete service nat rule {{ _skylab_adguard_nat_rule }} disable
save: true
- name: Delete address from node
become: true
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('address') in _node_addresses
ansible.builtin.command:
cmd: ip address delete {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
changed_when: true
- name: Assign address to alt node
delegate_to: "{{ _target_alt }}"
become: true
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('address') not in hostvars[_target_alt]._node_addresses
ansible.builtin.command:
cmd: ip address add {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ hostvars[_target_alt].skylab_cluster.interface.access }}
changed_when: true

View File

@@ -0,0 +1,9 @@
---
- ansible.builtin.import_playbook: skylab.core.node_down
- name: Shutdown node
hosts: "{{ node }}"
tasks:
- name: Shutdown node
become: true
community.general.shutdown:

View File

@@ -0,0 +1,58 @@
---
- name: Online nodes
hosts: cluster
vars_prompt:
- name: skylab_datastore_encryption_password
prompt: Enter datastore block decryption password
private: true
pre_tasks:
- name: Configure remote execution environment
ansible.builtin.import_tasks: tasks/meta/bootstrap-remote-env.yaml
roles:
- role: skylab.core.datastore
tasks:
- name: Fetch node swarm ID
ansible.builtin.command:
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
changed_when: false
register: _docker_node_id_raw
- name: Update node availability
vars:
ansible_python_interpreter: "{{ skylab_state_dir }}/ansible-runtime/bin/python"
community.docker.docker_node:
availability: active
hostname: "{{ _docker_node_id_raw.stdout.strip() }}"
- name: Determine node addresses
vars:
_node_addresses:
- "{{ (lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.address + '/' + lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.netmask) | ansible.netcommon.ipaddr('host/prefix') }}"
ansible.builtin.set_fact:
_node_addresses: "{{ _node_addresses + [(item.address + '/' + item.netmask) | ansible.netcommon.ipaddr('host/prefix')] }}"
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4_secondaries }}"
loop_control:
label: "{{ (item.address + '/' + item.netmask) | ansible.netcommon.ipaddr('host/prefix') }}"
- name: Determine cluster access addresses
run_once: true
vars:
_cluster_node_ips: []
ansible.builtin.set_fact:
_cluster_node_ips: "{{ _cluster_node_ips + [hostvars[item].skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix')] }}"
loop: "{{ groups.cluster }}"
- name: Remove alternative node IPs
become: true
when: item in _cluster_node_ips and item != (skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix'))
ansible.builtin.command:
cmd: ip address delete {{ item | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
changed_when: true
loop: "{{ _node_addresses }}"
- name: Add node IP
become: true
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') not in _node_addresses
ansible.builtin.command:
cmd: ip address add {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
changed_when: true

View File

@@ -0,0 +1,56 @@
---
- name: Group hosts by platform
hosts: all
tags:
- always
pre_tasks:
- include_tasks: tasks/meta/runtime-group-determination.yaml
- name: Bootstrap remote ansible environment
hosts: linux
gather_facts: false
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
- name: Configure common settings
hosts: linux
gather_facts: false
tasks:
- name: Set hostname
become: true
ansible.builtin.hostname:
name: "{{ skylab_hostname | default(inventory_hostname) }}"
use: systemd
- name: Disable case-sensitive tab-completion
become: true
ansible.builtin.lineinfile:
line: set completion-ignore-case On
path: /etc/inputrc
state: present
create: true
- name: Install EPEL repository config
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.yum_repository:
name: epel
description: Extra Packages for Enterprise Linux
baseurl: https://download.fedoraproject.org/pub/epel/$releasever{{ '/Everything' if ansible_distribution_major_version == '8' else '' }}/$basearch/
- name: Install EPEL GPG key
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.rpm_key:
state: present
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
- import_playbook: skylab.core.update
- import_playbook: skylab.core.configure

View File

@@ -0,0 +1,53 @@
---
- name: Install CentOS 8 python bindings
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
state: present
name:
- libffi-devel
- python3-devel
- python3-libselinux
- python3-policycoreutils
- python3-firewall
- name: Remove legacy state directory
become: true
ansible.builtin.file:
path: /var/run/skylab
state: absent
- name: Create state directory
become: true
ansible.builtin.file:
path: "{{ skylab_state_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0755
- name: Create bootstrap virtualenv
ansible.builtin.command:
cmd: "{{ ansible_python_interpeter | default(discovered_interpreter_python) }} -m venv {{ skylab_ansible_venv }} --system-site-packages"
creates: "{{ skylab_ansible_venv }}/bin/python"
- name: Pin bootstrap virtualenv pip
ansible.builtin.pip:
executable: "{{ skylab_ansible_venv }}/bin/pip"
name: pip
state: present
version: "{{ skylab_pip_version }}"
- name: Copy requirements file to remote
ansible.builtin.copy:
src: remote-requirements.txt
dest: "{{ skylab_ansible_venv }}/requirements.txt"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0644
- name: Install remote requirements
ansible.builtin.pip:
executable: "{{ skylab_ansible_venv }}/bin/pip"
requirements: "{{ skylab_ansible_venv }}/requirements.txt"
state: present

View File

@@ -0,0 +1,12 @@
---
- name: Group EdgeOS hosts
when: ansible_kernel.endswith('UBNT')
changed_when: false
group_by:
key: edgeos
- name: Group supported Linux hosts
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
changed_when: false
group_by:
key: linux

View File

@@ -0,0 +1,53 @@
---
version: "{{ skylab_compose_version }}"
networks:
adguard:
name: adguard
driver: overlay
ipam:
driver: default
config:
- subnet: "{{ app.networks.ext }}"
volumes:
{% for key, value in app.volumes.items() %}
adguard-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
server:
image: adguard/adguardhome:{{ app.versions.server }}
hostname: adguard
networks:
- adguard
dns:
{% for server in app.settings.upstream %}
- {{ server }}
{% endfor %}
ports:
- published: {{ app.ports.53 }}
target: 53
protocol: udp
mode: ingress
- published: {{ app.ports.53 }}
target: 53
protocol: tcp
mode: ingress
- published: {{ app.ports.8064 }}
target: 8064
protocol: tcp
mode: ingress
volumes:
- type: volume
source: adguard-config
target: /opt/adguardhome/conf
read_only: false
- type: volume
source: adguard-data
target: /opt/adguardhome/work
read_only: false
deploy:
replicas: 1

View File

@@ -0,0 +1,214 @@
---
version: "{{ skylab_compose_version }}"
x-global-env: &globalenv
LOCAL_UID: "{{ _app_account.uid }}"
LOCAL_GID: "{{ _app_account.uid }}"
ASPNETCORE_ENVIRONMENT: Production
globalSettings__selfHosted: "true"
globalSettings__baseServiceUri__vault: https://{{ app.publish.domain }}
globalSettings__baseServiceUri__api: https://{{ app.publish.domain }}/api
globalSettings__baseServiceUri__identity: https://{{ app.publish.domain }}/identity
globalSettings__baseServiceUri__admin: https://{{ app.publish.domain }}/admin
globalSettings__baseServiceUri__notifications: https://{{ app.publish.domain }}/notifications
globalSettings__baseServiceUri__internalNotifications: http://bitwarden_notifications:5000
globalSettings__baseServiceUri__internalAdmin: http://bitwarden_admin:5000
globalSettings__baseServiceUri__internalIdentity: http://bitwarden_identity:5000
globalSettings__baseServiceUri__internalApi: http://bitwarden_api:5000
globalSettings__baseServiceUri__internalVault: http://bitwarden_web:5000
globalSettings__pushRelayBaseUri: https://push.bitwarden.com
globalSettings__installation__identityUri: https://identity.bitwarden.com
globalSettings__sqlServer__connectionString: "Data Source=tcp:mssql,1433;Initial Catalog=vault;Persist Security Info=False;User ID=sa;Password=e934c0bb-3b5a-4e6b-b525-cd6d83004e1a;MultipleActiveResultSets=False;Connect Timeout=30;Encrypt=True;TrustServerCertificate=True"
globalSettings__identityServer__certificatePassword: {{ app.settings.certificatePassword }}
globalSettings__attachment__baseDirectory: /etc/bitwarden/core/attachments
globalSettings__attachment__baseUrl: https://{{ app.publish.domain }}/attachments
globalSettings__dataProtection__directory: /etc/bitwarden/core/aspnet-dataprotection
globalSettings__logDirectory: /etc/bitwarden/logs
globalSettings__licenseDirectory: /etc/bitwarden/core/licenses
globalSettings__internalIdentityKey: {{ app.settings.internalIdentityKey }}
globalSettings__duo__aKey: {{ app.settings.duo__aKey }}
globalSettings__installation__id: {{ app.settings.installation__id }}
globalSettings__installation__key: {{ app.settings.installation__key }}
globalSettings__yubico__clientId: REPLACE
globalSettings__yubico__key: REPLACE
globalSettings__mail__replyToEmail: noreply@enp.one
globalSettings__mail__smtp__host: REPLACE
globalSettings__mail__smtp__port: "587"
globalSettings__mail__smtp__ssl: "false"
globalSettings__mail__smtp__username: REPLACE
globalSettings__mail__smtp__password: REPLACE
globalSettings__disableUserRegistration: "false"
globalSettings__hibpApiKey: REPLACE
adminSettings__admins: ""
volumes:
{% for key, value in app.volumes.items() %}
bitwarden-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
networks:
bitwarden_internal:
internal: true
name: bitwarden_internal
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.internal }}
bitwarden_external:
internal: false
name: bitwarden_external
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.external }}
services:
mssql:
image: bitwarden/mssql:{{ app.versions.mssql }}
stop_grace_period: 60s
networks:
- bitwarden_internal
volumes:
- bitwarden-db-data:/var/opt/mssql/data
- bitwarden-db-backup:/etc/bitwarden/mssql/backups
- bitwarden-logs-db:/var/opt/mssql/log
environment:
LOCAL_UID: "{{ _app_account.uid }}"
LOCAL_GID: "{{ _app_account.uid }}"
ACCEPT_EULA: "Y"
MSSQL_PID: Express
SA_PASSWORD: {{ app.settings.SA_PASSWORD }}
deploy:
replicas: 1
web:
image: bitwarden/web:{{ app.versions.web }}
networks:
- bitwarden_internal
volumes:
- bitwarden-web:/etc/bitwarden/web
environment: *globalenv
deploy:
replicas: 1
attachments:
image: bitwarden/attachments:{{ app.versions.attachments }}
networks:
- bitwarden_internal
volumes:
- bitwarden-core:/etc/bitwarden/core
environment: *globalenv
deploy:
replicas: 1
api:
image: bitwarden/api:{{ app.versions.api }}
volumes:
- bitwarden-core:/etc/bitwarden/core
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-api:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
identity:
image: bitwarden/identity:{{ app.versions.identity }}
volumes:
- bitwarden-identity:/etc/bitwarden/identity
- bitwarden-core:/etc/bitwarden/core
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-identity:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
admin:
image: bitwarden/admin:{{ app.versions.admin }}
depends_on:
- mssql
volumes:
- bitwarden-core:/etc/bitwarden/core
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-admin:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
icons:
image: bitwarden/icons:{{ app.versions.icons }}
volumes:
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-icons:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
notifications:
image: bitwarden/notifications:1.40.0
volumes:
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-notifications:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
events:
image: bitwarden/events:{{ app.versions.events }}
volumes:
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-events:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
nginx:
image: bitwarden/nginx:{{ app.versions.nginx }}
depends_on:
- web
- admin
- api
- identity
ports:
- published: {{ app.ports.8080 }}
target: 8080
protocol: tcp
mode: ingress
- published: {{ app.ports.8443 }}
target: 8443
protocol: tcp
mode: ingress
volumes:
- bitwarden-nginx-data:/etc/bitwarden/nginx
- bitwarden-ssl:/etc/ssl
- bitwarden-logs-nginx:/var/log/nginx
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1

View File

@@ -0,0 +1,52 @@
---
version: "{{ skylab_compose_version }}"
networks:
gitea:
name: gitea
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
gitea-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
server:
image: gitea/gitea:{{ app.versions.server }}
hostname: gitea
networks:
- gitea
ports:
- published: {{ app.ports.3000 }}
target: 3000
protocol: tcp
mode: ingress
- published: {{ app.ports.22 }}
target: 22
protocol: tcp
mode: ingress
volumes:
- type: volume
source: gitea-data
target: /data
read_only: false
environment:
USER_UID: "{{ _app_account.uid }}"
USER_GID: "{{ _app_account.uid }}"
APP_NAME: ENP Version Control System
RUN_MODE: prod
ROOT_URL: https://{{ app.publish.domain }}/
DB_TYPE: sqlite3
DISABLE_REGISTRATION: "true"
deploy:
replicas: 1

View File

@@ -0,0 +1,99 @@
---
version: "{{ skylab_compose_version }}"
networks:
meta:
name: meta
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
meta-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
proxy:
image: nginx:{{ app.versions.proxy }}
hostname: proxy
networks:
- meta
extra_hosts:
- "dockerloopback:{{ app.settings.loopback_address }}"
ports:
- published: {{ app.ports.80 }}
target: 80
protocol: tcp
mode: ingress
- published: {{ app.ports.443 }}
target: 443
protocol: tcp
mode: ingress
volumes:
- type: volume
source: meta-nginx
target: /etc/nginx
read_only: true
- type: volume
source: meta-letsencrypt-config
target: /etc/letsencrypt
read_only: true
deploy:
replicas: 2
placement:
max_replicas_per_node: 1
certbot:
image: certbot/certbot:{{ app.versions.certbot }}
hostname: certbot
command: renew --standalone
networks:
- meta
ports:
- published: 8088 # This is hardcoded to avoid conflicts
target: 80
protocol: tcp
mode: ingress
volumes:
- type: volume
source: meta-letsencrypt-config
target: /etc/letsencrypt
read_only: false
- type: volume
source: meta-letsencrypt-data
target: /var/lib/letsencrypt
read_only: false
deploy:
replicas: 1
restart_policy:
condition: any
delay: 24h
backup:
image: rockylinux:latest
hostname: backup
command: bash /datastore/backup/mkbkup.sh /datastore/
networks:
- meta
volumes:
- type: volume
source: meta-backup
target: /datastore/backup
read_only: false
- type: volume
source: meta-appdata
target: /datastore/appdata
read_only: true
deploy:
replicas: 1
restart_policy:
condition: any
delay: 24h

View File

@@ -0,0 +1,55 @@
---
version: "{{ skylab_compose_version }}"
networks:
minecraft:
name: minecraft
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
minecraft-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
server:
image: itzg/minecraft-server:latest
hostname: minecraft
networks:
- minecraft
ports:
- published: {{ app.ports.25565 }}
target: 25565
protocol: tcp
mode: ingress
volumes:
- type: volume
source: minecraft-data
target: /data
read_only: false
environment:
EULA: "TRUE"
TZ: Americas/New_York
VERSION: {{ app.versions.server }}
MAX_MEMORY: "8G"
MOTD: "A home for buttery companions"
MODE: survival
OPS: {{ app.settings.admins | default([]) | join(',') }}
WHITELIST: "{{ app.settings.users | default([]) | join(',') }}"
MAX_BUILD_HEIGHT: "512"
SNOOPER_ENABLED: "false"
ICON: https://cdn.enp.one/img/logos/e-w-sm.png
ENABLE_RCON: "false"
UID: "{{ _app_account.uid }}"
GID: "{{ _app_account.uid }}"
deploy:
replicas: 1

View File

@@ -0,0 +1,113 @@
---
version: '3.7'
volumes:
photoprism-database:
name: datastore/appdata/photoprism/database
driver: glusterfs
photoprism-metadata:
name: datastore/appdata/photoprism/metadata
photoprism-originals:
name: datastore/media/photoprism
driver: glusterfs
photoprism-import:
name: datastore/media/upload
driver: glusterfs
networks:
photoprism:
internal: true
name: photoprism
driver: overlay
ipam:
driver: default
config:
- subnet: 192.168.109.0/24
services:
app:
image: photoprism/photoprism:latest
hostname: app
depends_on:
- database
networks:
- photoprism
ports:
- published: 2342
target: 2342
protocol: tcp
mode: ingress
environment:
PHOTOPRISM_ADMIN_PASSWORD: "gm2auW34GNawZ8Dqiub8W8vOlvsHCnfj"
PHOTOPRISM_SITE_URL: "http://cluster.skylab.enp.one:2342/"
PHOTOPRISM_ORIGINALS_LIMIT: 5000
PHOTOPRISM_HTTP_COMPRESSION: "gzip"
PHOTOPRISM_DEBUG: "false"
PHOTOPRISM_PUBLIC: "false"
PHOTOPRISM_READONLY: "false"
PHOTOPRISM_EXPERIMENTAL: "false"
PHOTOPRISM_DISABLE_CHOWN: "false"
PHOTOPRISM_DISABLE_WEBDAV: "false"
PHOTOPRISM_DISABLE_SETTINGS: "false"
PHOTOPRISM_DISABLE_TENSORFLOW: "false"
PHOTOPRISM_DISABLE_FACES: "false"
PHOTOPRISM_DISABLE_CLASSIFICATION: "false"
PHOTOPRISM_DARKTABLE_PRESETS: "false"
PHOTOPRISM_DETECT_NSFW: "false"
PHOTOPRISM_UPLOAD_NSFW: "true"
PHOTOPRISM_DATABASE_DRIVER: "mysql"
PHOTOPRISM_DATABASE_SERVER: "database:3306"
PHOTOPRISM_DATABASE_NAME: "photoprism"
PHOTOPRISM_DATABASE_USER: "photoprism"
PHOTOPRISM_DATABASE_PASSWORD: "KcIKhME9OwWKVz4tGyqI4VXzyDBs33Xp" # MariaDB or MySQL database user password
PHOTOPRISM_SITE_TITLE: "Skylab Images"
PHOTOPRISM_SITE_CAPTION: "Browse Your Life"
PHOTOPRISM_SITE_DESCRIPTION: ""
PHOTOPRISM_SITE_AUTHOR: "EN Paul"
HOME: "/photoprism"
PHOTOPRISM_UID: 1408
PHOTOPRISM_GID: 1408
## Hardware video transcoding config (optional)
# PHOTOPRISM_FFMPEG_BUFFERS: "64" # FFmpeg capture buffers (default: 32)
# PHOTOPRISM_FFMPEG_BITRATE: "32" # FFmpeg encoding bitrate limit in Mbit/s (default: 50)
# PHOTOPRISM_FFMPEG_ENCODER: "h264_v4l2m2m" # Use Video4Linux for AVC transcoding (default: libx264)
# PHOTOPRISM_FFMPEG_ENCODER: "h264_qsv" # Use Intel Quick Sync Video for AVC transcoding (default: libx264)
# PHOTOPRISM_INIT: "intel-graphics tensorflow-amd64-avx2" # Enable TensorFlow AVX2 & Intel Graphics support
## Enable TensorFlow AVX2 support for modern Intel CPUs (requires starting the container as root)
# PHOTOPRISM_INIT: "tensorflow-amd64-avx2"
user: "1408:1408"
working_dir: "/photoprism"
volumes:
- type: volume
source: photoprism-originals
target: /photoprism/originals
read_only: false
- type: volume
source: photoprism-metadata
target: /photoprism/storage
read_only: false
- type: volume
source: photoprism-import
target: /photoprism/import
read_only: true
deploy:
replicas: 1
database:
image: mariadb:10.6
hostname: database
command: mysqld --innodb-buffer-pool-size=128M --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=120
networks:
- photoprism
volumes:
- type: volume
source: photoprism-database
target: /var/lib/mysql
read_only: false
environment:
MYSQL_ROOT_PASSWORD: insecure
MYSQL_DATABASE: photoprism
MYSQL_USER: photoprism
MYSQL_PASSWORD: KcIKhME9OwWKVz4tGyqI4VXzyDBs33Xp
deploy:
replicas: 1

View File

@@ -0,0 +1,95 @@
---
version: "{{ skylab_compose_version }}"
networks:
plex:
name: plex
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
plex-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
plex-data:
name: plex-data
driver: local
driver_opts:
type: nfs
o: "addr={{ app.settings.mediastore }},ro"
device: ":/nfs/plex"
services:
server:
image: plexinc/pms-docker:{{ app.versions.server }}
hostname: plex-media-server
networks:
- plex
ports:
- published: {{ app.ports.32400 }}
target: 32400
protocol: tcp
mode: ingress
- published: {{ app.ports.3005 }}
target: 3005
protocol: tcp
mode: ingress
- published: {{ app.ports.8324 }}
target: 8324
protocol: tcp
mode: ingress
- published: {{ app.ports.32469 }}
target: 32469
protocol: tcp
mode: ingress
- published: {{ app.ports.1900 }}
target: 1900
protocol: udp
mode: ingress
- published: {{ app.ports.32410 }}
target: 32410
protocol: udp
mode: ingress
- published: {{ app.ports.32413 }}
target: 32413
protocol: udp
mode: ingress
- published: {{ app.ports.32414 }}
target: 32414
protocol: udp
mode: ingress
volumes:
- type: volume
source: plex-config
target: /config
read_only: false
- type: volume
source: plex-data
target: /data
read_only: true
- type: volume
source: plex-personal
target: /personal
read_only: false
environment:
TZ: "Americas/New_York"
ALLOWED_NETWORKS: {{ app.settings.internal_subnets | join(',') }}
PLEX_UID: "{{ _app_account.uid }}"
PLEX_GID: "{{ _app_account.uid }}"
deploy:
replicas: 1
placement:
{% if app.settings.exclude_hosts is defined %}
constraints:
{% for host in app.settings.exclude_hosts %}
- node.hostname!={{ host }}
{% endfor %}
{% endif %}

View File

@@ -0,0 +1,70 @@
---
version: "{{ skylab_compose_version }}"
networks:
unifi:
name: unifi
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
unifi-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
wlc:
image: jacobalberty/unifi:{{ app.versions.wlc }}
hostname: en1-unifi-wlc
init: true
networks:
- unifi
ports:
- published: {{ app.ports.8080 }}
target: 8080
protocol: tcp
mode: ingress
- published: {{ app.ports.8443 }}
target: 8443
protocol: tcp
mode: ingress
- published: {{ app.ports.8843 }}
target: 8843
protocol: tcp
mode: ingress
- published: {{ app.ports.8880 }}
target: 8880
protocol: tcp
mode: ingress
- published: {{ app.ports.3478 }}
target: 3478
protocol: udp
mode: ingress
- published: {{ app.ports.6789 }}
target: 6789
protocol: tcp
mode: ingress
- published: {{ app.ports.10001 }}
target: 10001
protocol: udp
mode: ingress
volumes:
- type: volume
source: unifi-data
target: /unifi
read_only: false
environment:
RUNAS_UID0: "false"
UNIFI_UID: "{{ _app_account.uid }}"
UNIFI_GID: "{{ _app_account.uid }}"
TZ: "Americas/New_York"
deploy:
replicas: 1

View File

@@ -0,0 +1,108 @@
---
version: "{{ skylab_compose_version }}"
networks:
vikunja:
name: vikunja
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
vikunja-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
database:
image: mariadb:{{ app.versions.database }}
hostname: database
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
networks:
- vikunja
volumes:
- type: volume
source: vikunja-database
target: /var/lib/mysql
read_only: false
environment:
MYSQL_RANDOM_ROOT_PASSWORD: "true"
MYSQL_USER: vikunja
MYSQL_PASSWORD: {{ app.settings.database_password }}
MYSQL_DATABASE: vikunja
deploy:
replicas: 1
cache:
image: redis:{{ app.versions.cache }}
hostname: cache
networks:
- vikunja
deploy:
replicas: 1
proxy:
image: nginx:{{ app.versions.proxy }}
hostname: proxy
networks:
- vikunja
ports:
- published: {{ app.ports.80 }}
target: 80
protocol: tcp
mode: ingress
volumes:
- type: volume
source: vikunja-nginx
target: /etc/nginx/conf.d
read_only: true
deploy:
replicas: 1
api:
image: vikunja/api:{{ app.versions.api }}
hostname: api
networks:
- vikunja
depends_on:
- database
- cache
volumes:
- type: volume
source: vikunja-files
target: /app/vikunja/files
read_only: false
environment:
VIKUNJA_DATABASE_HOST: database
VIKUNJA_DATABASE_PASSWORD: {{ app.settings.database_password }}
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_REDIS_ENABLED: "1"
VIKUNJA_REDIS_HOST: cache:6379
VIKUNJA_CACHE_ENABLED: "1"
VIKUNJA_CACHE_TYPE: redis
VIKUNJA_FILES_MAXSIZE: 50MB
deploy:
replicas: 1
web:
image: vikunja/frontend:{{ app.versions.web }}
hostname: web
networks:
- vikunja
depends_on:
- database
- cache
- proxy
environment:
VIKUNJA_API_URL: https://{{ app.publish.domain }}/api/v1
deploy:
replicas: 1

View File

@@ -0,0 +1,34 @@
# Ansible managed file - do not manually edit
#
server {
server_name {{ app.publish.domain }};
root /usr/share/nginx/html;
location / {
proxy_pass http://dockerloopback:{{ app.publish.http }}/;
proxy_set_header Host $host;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/{{ app.publish.domain }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ app.publish.domain }}/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
}
server {
listen 80;
listen [::]:80;
server_name {{ app.publish.domain }};
location ^~ /.well-known/acme-challenge/ {
proxy_pass http://dockerloopback:8088/.well-known/acme-challenge/;
proxy_set_header Host $host;
}
location / {
return 301 https://$host$request_uri;
}
}
# EOF

View File

@@ -0,0 +1,224 @@
---
- name: Group hosts by platform
hosts: all
tags:
- always
pre_tasks:
- include_tasks: tasks/meta/runtime-group-determination.yaml
- name: Bootstrap remote ansible environment
hosts: linux
gather_facts: false
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
- name: Update system
hosts: linux
tags:
- packages
vars_files:
- vars/packages.yaml
tasks:
- name: Update system packages via DNF
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
name: "*"
state: latest
- name: Install global bash config
become: true
ansible.builtin.copy:
src: global.sh
dest: /etc/profile.d/ZZ-skylab-global.sh
owner: root
group: "{{ ansible_user }}"
mode: 0644
- name: Install universal packages on Rocky
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.dnf:
name: "{{ skylab_packages_global + skylab_packages_rocky }}"
state: present
update_cache: true
- name: Install universal packages on Fedora
when: ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
name: "{{ skylab_packages_global + skylab_packages_fedora }}"
state: present
update_cache: true
- name: Update unix accounts
hosts: linux
tags:
- accounts
- access
vars_files:
- vars/access.yaml
- vars/sshkeys.yaml
tasks:
- name: Create management groups
become: true
ansible.builtin.group:
name: "{{ item.name }}"
gid: "{{ item.gid }}"
state: present
loop:
- "{{ skylab_group }}"
- "{{ skylab_group_admin }}"
- "{{ skylab_group_automation }}"
loop_control:
label: "{{ item.name }},{{ item.gid }}"
- name: Determine existing skylab users
changed_when: false
ansible.builtin.shell:
cmd: 'grep "{{ skylab_group.name }}:" /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
register: _existing_skylab_accounts
- name: Determine deleted skylab users
vars:
_deleted_accounts: []
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
ansible.builtin.set_fact:
_deleted_accounts: "{{ _deleted_accounts + [item] }}"
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
- name: Delete accounts
when: _deleted_accounts | default(false)
block:
- name: Delete removed user accounts
become: true
ansible.builtin.user:
name: "{{ item }}"
state: absent
loop: "{{ _deleted_accounts }}"
- name: Delete removed user groups
become: true
ansible.builtin.group:
name: "{{ item }}"
state: absent
loop: "{{ _deleted_accounts }}"
- name: Delete removed user home directories
become: true
ansible.builtin.file:
path: "/home/{{ item }}"
state: absent
loop: "{{ _deleted_accounts }}"
- name: Determine active users
when: item.targets | default([]) | intersect(skylab_targets)
vars:
_active_accounts: []
ansible.builtin.set_fact:
_active_accounts: "{{ _active_accounts + [item] }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create account groups
become: true
ansible.builtin.group:
name: "{{ item.name }}"
gid: "{{ item.uid }}"
state: present
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Determine account groups
ansible.builtin.set_fact:
_determined_member_groups: "{{ _determined_member_groups | default({}) | combine({item.name: [
skylab_group.name,
'wheel' if (item.admin | default(false) and ansible_os_family == 'RedHat') else '',
'sudo' if (item.admin | default(false) and ansible_os_family == 'Debian') else '',
skylab_group_admin.name if item.admin | default(false) else '',
skylab_group_automation.name if item.service | default(false) else '',
]}) }}"
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create accounts
become: true
ansible.builtin.user:
name: "{{ item.name }}"
state: present
uid: "{{ item.uid }}"
group: "{{ item.name }}"
groups: "{{ _determined_member_groups[item.name] }}"
comment: "{{ item.fullname | default('') }}"
system: "{{ item.service | default(false) }}"
generate_ssh_key: true
ssh_key_bits: 4096
ssh_key_passphrase: "{{ item.password }}"
ssh_key_comment: "{{ item.name }}@{{ inventory_hostname }}"
ssh_key_type: ed25519
password: "{{ item.password }}"
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Ensure proper ownership of user home directories
become: true
ansible.builtin.file:
path: /home/{{ item.name }}
state: directory
group: "{{ item.name }}"
owner: "{{ item.name }}"
mode: 0700
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create SSH directory
become: true
ansible.builtin.file:
path: /home/{{ item.name }}/.ssh
owner: "{{ item.name }}"
group: "{{ item.name }}"
state: directory
mode: 0700
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Update authorized keys
become: true
ansible.builtin.authorized_key:
user: "{{ item.name }}"
key: "{{ skylab_ssh_keys[item.name] | join('\n') }}"
state: present
exclusive: true
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce ownership of authorized keys
become: true
ansible.builtin.file:
path: /home/{{ item.name }}/.ssh/authorized_keys
state: file
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: 0400
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce root password
become: true
ansible.builtin.user:
name: root
password: "{{ skylab_root_password }}"
state: present

View File

@@ -0,0 +1,64 @@
---
skylab_accounts:
- name: enpaul
uid: 1300
fullname: Ethan N. Paul
targets: [network, datastore, cluster, cloud, workstation]
admin: true
password: $6$H7jZEL2Ey93zfMTD$CzUlZkXDudPHgUMU/OFUn8/Yhzo8nBxoSI8thD15toIFlWN.iUfq/Jp5z3KpDCGTxyv.IbRTvE8dOVWjoRfgJ.
- name: ansible
uid: 1400
targets: [network, datastore, cluster, cloud, workstation]
admin: true
service: true
password: $6$qNKmYg4y9YS4f5Gr$m0mAqEVbymPguj.1cS.pfclt33Okfmn1KhFC0r1iQ3eVvz/OIZY3x0qGmPnJ1zOXDWyKKs5hnlGTAeZgCh49C.
- name: autocraft
uid: 1401
service: true
password: $6$j8aWjPweCWmNT4cZ$F7puAjVES60a5mkBg1WfYMYIGbLrs8FFowf5BPzVo9qgbMYEC53i8rHezDACZjHmOxj5PhJkmZjHD4vfbf3PC1
- name: autotea
uid: 1402
service: true
password: $6$NdUiEi2P8TI.2jgb$ai1WbWno8QK6Wg/KAV4IacwG92FctN5aQX2i45a9DSCg8k1nkwGj5gc230FXePh8P7vzQ6ifYrYsAvEfZ1J8B.
- name: autowarden
uid: 1403
service: true
password: $6$a18IBPw40.ELiuy5$RbGfNGUe9iTA6bOaKLsp7q3X3uQ3D1LP8LAF5ioQAafimVvCtwyG.O4Colo9nsl2yoCF6TMIHX9zOTavkbg7W0
- name: autoguard
uid: 1404
service: true
password: $6$PLf4ifrrk0/5AF21$ohZXy0xDzyUiHXwoCW8zvbrPHFiWSWxYx2.QlDy09wND7RbPhwxghHS0trWWVdy14jAxU45mz5YvdAl7qmEIO0
- name: autoplex
uid: 1405
service: true
password: $6$VfMCenzm5UPHrpNN$yQIpnQUZPhO9UoSylaOxR6TOrJfR5dFdzdRFzle7dP/bfeDBKxC6hsy52IEowilL.aCbYevz67R9s1hB3q9GU1
- name: autounifi
uid: 1406
service: true
password: $6$ScrKQGmAifGVDovx$wuQQhEdNya8Tjj/oTeY/eT1grLl17hSYbVeWIIqU.jv.W9vFyoVkxeE/lBcPvBe8XdGjOxWu32WsnomL8irb11
- name: autollama
uid: 1407
service: true
password: $6$lEdCBbqlWIdHuRZZ$Pr9SAybk7uCTfzjtCpSe7RrwM2TKqr8vWtLDARZRQ9e1RpNKHP2bEvkeg2VPc7oACVfxbg7Y8PP0rKOR.3fcD.
skylab_group:
name: skylab
gid: 1200
skylab_group_admin:
name: skylab_admin
gid: 1201
skylab_group_automation:
name: skylab_auto
gid: 1202
skylab_root_password: $6$FDwVi2DUVPg.LSrC$vRMIW6ah0x5cSZFLDrV2FuiwoUtYgcnJJV06gn2HxLsUnkXJ0/Sv1hjRn8v6bZy1AmkDCyQCtT6DHRRBuQspx.

View File

@@ -0,0 +1,35 @@
---
skylab_packages_global:
- automake
- cmake
- curl
- gcc
- gcc-c++
- git
- jq
- make
- nano
- openssl-devel
- openssh-server
- systemd-devel
- unzip
- vim
- vim-minimal
skylab_packages_rocky:
- bind-utils
- bash-completion
- nc
- nfs-utils
- python3
- python3-pip
- python3-setuptools
- python3-virtualenv
- systemd-networkd
- wget
skylab_packages_fedora:
- bind-utils
- nc
- nfs-utils
- wget

View File

@@ -0,0 +1,240 @@
---
skylab_services:
meta:
networks:
ext: 192.168.99.0/24
volumes:
nginx: /appdata/nginx
letsencrypt-config: /appdata/letsencrypt/config
letsencrypt-data: /appdata/letsencrypt/data
ports:
80: 80
443: 443
versions:
proxy: latest
certbot: latest
settings:
loopback_address: 192.168.255.255
minecraft:
user: autocraft
networks:
ext: 192.168.102.0/24
volumes:
data: /appdata/minecraft
ports:
25565: 25565
versions:
server: 1.16.5
publish:
domain: mcs.enp.one
settings:
admins:
- ScifiGeek42
users:
- ScifiGeek42
- fantasycat256
- CoffeePug
- Snowdude21325
- KaiserSJR
- glutenfreebean
gitea:
user: autotea
networks:
ext: 192.168.103.0/24
volumes:
data: /appdata/gitea
ports:
3000: 3000
22: 2222
publish:
domain: vcs.enp.one
http: 3000
versions:
server: 1.15.4
bitwarden:
user: autowarden
networks:
internal: 192.168.104.0/24
external: 192.168.105.0/24
volumes:
db-data: /appdata/bitwarden/mssql/data
db-backup: /appdata/bitwarden/mssql/backup
nginx-data: /appdata/bitwarden/nginx
web: /appdata/bitwarden/web
ssl: /appdata/bitwarden/ssl
ca-certs: /appdata/bitwarden/ca-certificates
core: /appdata/bitwarden/core
identity: /appdata/bitwarden/identity
logs-api: /appdata/bitwarden/logs/api
logs-db: /appdata/bitwarden/logs/mssql
logs-identity: /appdata/bitwarden/logs/identity
logs-nginx: /appdata/bitwarden/logs/nginx
logs-admin: /appdata/bitwarden/logs/admin
logs-icons: /appdata/bitwarden/logs/icons
logs-notifications: /appdata/bitwarden/logs/notifications
logs-events: /appdata/bitwarden/logs/events
ports:
8080: 8090
8443: 8943
versions:
mssql: 1.40.0
web: 2.19.0
attachments: 1.40.0
api: 1.40.0
identity: 1.40.0
admin: 1.40.0
icons: 1.40.0
events: 1.40.0
nginx: 1.40.0
publish:
domain: ssv.enp.one
http: 8090
settings:
certificatePassword: !vault |
$ANSIBLE_VAULT;1.1;AES256
34336462333965626665636664636338353139306135393862656539623935666134666638313632
6337393734353237373233663763666566316637393436650a346134353365626637313732346565
64373866633430613637663230383866336362313739313335646330373666353536396463376364
3730306338623831300a346565613730326138333732306237333236393237653363386263376531
30653663346234383538316337386534356534316437323561646637636361396462393335316233
3931623037626539636535353963666635316334613833396437
internalIdentityKey: !vault |
$ANSIBLE_VAULT;1.1;AES256
64343365323264303635306461386464626535343138333637333035343365386138363261666561
3036376532316230326238626662663434343131393336350a363230333637373231333332356230
66383466626139396365333865663538386130633136643861353936613330613535313363323639
6538656632376330380a373534393361613234366536353866353366646263643565346534393235
30623261626364613063353839663130656436316531666431316332653330636436323331316462
3539383064363338313433343837363563313838333231363639
duo__aKey: !vault |
$ANSIBLE_VAULT;1.1;AES256
38353861643436373461393663616366383139393164366664303333333431663364613530323532
3434643335353964613464393734623934313164663339340a303831353734623332316464333735
34343961393562366435653935313038336638623061353761343538333264386638306363386463
3339346561333039650a353163633263386232646366323637383866303033356631376639383561
36316333336434393364316565353363623036613233633933616532376530653138366432303762
6532343435636261353434323461646365396538646466353032
installation__id: !vault |
$ANSIBLE_VAULT;1.1;AES256
62323837366638363735393462326566633235356261326636623239366462316465636163663063
3065613765386138653239383332306363346236666662660a313634333334396633646465356435
66666231633938613838663364323331666434383439303931393761313563663931386532336330
6433383331643933610a323565636462663865666435376334346535323964663264363039623364
32653966363634376534383664663535373830366466336463613365653463363663316165303330
3834653063653334313931643330663163386638363938643130
installation__key: !vault |
$ANSIBLE_VAULT;1.1;AES256
38353130336136623437653131316461653561393539373630623135383036643135623361613735
6431306133623866613836363361376163656434343230660a663635393861333863376461336661
30386562353730326665323030393531663234373430363639306562633031363065386665646431
3163633239366630300a313436386131376433333231346135393735373236626365393533626232
61313536323437363234396536623662613434333363326565303939363562353732
SA_PASSWORD: !vault |
$ANSIBLE_VAULT;1.1;AES256
64313236346631366338313139396532346461333835616466313037363132656632323566663138
6665393239656262363261303362303437343438626234340a663836623362353431373035356562
61383865303835323336363862303035363161376336346563323966633361333966363232393665
6166323331353065380a616138303531643063653633656561383761393433646130656432363436
62383763316130306235396338356236636263653830666139663064626633643635386237373034
3465323836373437383465316537666337373134616135626238
adguard:
user: autoguard
networks:
ext: 192.168.108.0/24
volumes:
config: /appdata/adguard/config
data: /appdata/adguard/data
ports:
53: 53
8064: 8084
versions:
server: v0.106.3
publish:
domain: adguard.en1.local
http: 8064
settings:
upstream:
- 1.1.1.1
- 1.0.0.1
plex:
user: autoplex
networks:
ext: 192.168.101.0/24
volumes:
config: /appdata/plex
ports:
32400: 32400
3005: 3005
8324: 8324
32469: 32469
1900: 1900
32410: 32410
32413: 32413
32414: 32414
versions:
server: latest
publish:
domain: pms.enp.one
http: 32400
settings:
mediastore: mediastore.skylab.enp.one
internal_subnets:
- 10.42.100.0/24
- 10.42.101.0/24
exclude_hosts:
- jupiter.net.enp.one
- pegasus.skylab.enp.one
unifi:
user: autounifi
networks:
ext: 192.168.100.0/24
volumes:
data: /appdata/unifi
ports:
8080: 8080
8443: 8443
8843: 8843
8880: 8880
3478: 3478
6789: 6789
10001: 10001
versions:
wlc: "6.2"
publish:
domain: unifi.en1.local
http: 8080
vikunja:
user: autollama
networks:
ext: 192.168.107.0/24
volumes:
database: /appdata/vikunja/database
files: /appdata/vikunja/files
nginx: /appdata/vikunja/nginx
ports:
80: 8087
versions:
database: "10"
cache: latest
proxy: latest
api: 0.18.1
web: 0.18.1
publish:
domain: org.enp.one
http: 8087
settings:
database_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
35313866386161376430383232343834633566363136323761316531663633383231653135313565
6332336461356164623237306436393131383566656233640a316262616161336331356565363963
35313430303237313039346162653564623236373564306333393362623134613437656231633635
6334616138663036610a646234366264646363353635356338633035373166343763353733336339
38663937383165386530326138363965626666386366636330343133633238636236316432613136
6662313533316563646461646336396430306466323831613730

View File

@@ -0,0 +1,9 @@
---
skylab_ssh_keys:
enpaul:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDi9rWRC15og/+3Jc3AMHOrlIc2BHaAX9PLbklz3qfFtSOp9bIScMcH5ZR6lyVSgP8RCEjI5HuZejDUJTXUQYfvEJeno//vmxn9Vw66lpMz/FSJ3JcDbjDVI1pe3A8mTOAS+AoVOEzCUOJVZJvdI272Hgf+QRBu+ONQ12u+2XYdVfLFDe7mAG+vEJRBatwb8B7Al+/LUpIrCuPm9PzMBtCMFjWGaqQgnyJYRSPIGxz9231XIjwhHLOQG1R0jLGuS37X+J49Y5JYDaHf9q9KH76GjdO2rOq6aGvwN93Y4Z+D2hMOklhD0Ez/ZE+I3ZUPV0e5pF28gsA6L7gTeqmSGpQaKdwjCUoU12VM70OVxng5p2+7DIc0k2np7rnvd4zybgn9OMM+TIO5M3c6ocDuNsEmRgfS3V99X5oh9qNy35UdBXV08j0wFoUo1KcyGwyNBYzKzvkkvtgJezVKmqSPKeBjMgMX4UsJsMn27Zosk0ZgoUwLFPO9Pg7uShncwgsTnvYDR1ws53PV832gc7A85ud/dC9Fjn6jBsMQaCFbiZktc5J8mv3cugQHQesbq8Y2aNVRu+ECb+KUvAEdPacWdBOkk0IvZ4PvLrAs2xehF6FYVqKVtPlJMaUAAwj9vVx7Nl2HnsSRIrCgxsMOTOhbbp/3NrvM8r6K7zKBzXg2TNgQeQ== enpaul@ph-1
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8K74AXz6LBYfoXRbY1pKP+1rvi5BpjFOkg1uYuMM+q78toagSREpLrbjvPtoL8fkEFox4T9TUaME08ZP8560R6iRk35extTTiXquflEwvpsF+e9Lv8E712s/1ydJpkYoux1OohE4W5D9DjVMEW1tjXeb+9aDUcVml6YKMpKLpjEIVanyjHMN13XgswKZGoK3mVMnWxE36fbGVVfiFCvMr/BpjqGShRCxmvldzgq76i1BpTKi6omOjvpgRQcUJcDhYcHAUVSlNccgGLmlAPiUillA//pk84yczzH1dQBB6571Ab5ldoUDBU/hJ0W27aeOfrepup4hNuUt2oux+zAn+uetAuAWKU2Kd0Oo6L5IKObbAQLI0CXfyrmHlrYXwTyNMFleeOdw7s9lf2ra3YCYVXfMALdE6pp+HJLBxzg9kMBbTp6zkV7ZKi75AQhyBJA0s4+vRUccBtJit3Tls+aw/3rd9Dt9lLaXkE80khoKsUI0eiuXtPDlurGxkpcTe6Al/lChNA19pdKEUsBmhD6UUvMTYWlApmta/+xf0wDsSyHqJcnIGx8Qdtg3c4j1Ch+WXwbdOwk8XJrL0atWmv2ium1ll/arO2NFBmbwG1LG/lzJ1k/DoAiHrKrb1HdlwDk0O/7xF/zyC2mfVZKO36+g4XlA7wDJc0tB5vIymlEy2rw== enpaul@discovery
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ3ron1rnTp4t9iyB2VGY5jNuOuJcOgZD3KewjPqOijA enpaul@voyager
ansible:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP5TGKururOa1Y+cbv8AWXYI5zhfZCDV0fsBG+33IYUc enpaul@ansible.voyager
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDN/E/zjwCruZf+mboBeBAtNFBInWMvdm6TjwpIg6jVoGgRmYQxEfIcqDSfBTQQ0gbyE3udTdTUDRLRu7ADktzL9J0IepEaYARfz8SANS/Hx2MR0NNy8DArrSFJCOillDA7E7UmNPIPeQl0O76p2ZjEDy6qZnTiW8eOtD7LCzJp4eGJanUPFhag8f4aSNbmiHGR25Zuk82w2/+KrqiI3gO0+jNlnPBf+XHNnFbtUIroupRfxgLdk1OahmkWHTSHkDtXiYrWIISarrHCgVqHTHo1KIX5+MPOH4S5VLB1kaY/O7+g/XlFrAciw8m0zjyBq0ILb+YTSrL9PYnSBtnHAVGJv2bB+TgCfF/nhQGqoqBqqQHFnX0y3JygmDTJMO+aE5wlvI5Laki7EHYPU4fL+Ge76l/dG9j2anw4/iHklbfk1UOxnLvJl593GAlILg1Kd8xx9VfYzVZ7GZym2zq3NI4uQ77T1H4iGoE67zarkn3peKacjX/KARq4weVvs3irHIHibnIuh/TGcS4eiQoNdPxsSA2wRKB6jeuXiV65F1rUDNGs80wcJmsAbZN8/u9Tt0o/Xc+L/LVhV0yrSeBUxzXtlaS+RfcteBXByO3xfC112Cj5grKVki5xWN9AY42Y6JhT3OyiO33dKUMEF/KfiEWWAfvQr/t1CI/rdcEbv3pyUw== enpaul@ansible.discovery

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
[grafana]
name=grafana
baseurl=https://packages.grafana.com/enterprise/rpm
repo_gpgcheck=1
enabled=1
gpgcheck=1
gpgkey=https://packages.grafana.com/gpg.key
sslverify=1
sslcacert=/etc/pki/tls/certs/ca-bundle.crt

View File

@@ -0,0 +1,22 @@
# Ansible managed file - DO NOT EDIT
#
# https://www.digitalocean.com/community/tutorials/how-to-create-a-self-signed-ssl-certificate-for-nginx-in-ubuntu-16-04
#
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
ssl_ecdh_curve secp384r1;
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off;
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 valid=300s;
resolver_timeout 5s;
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains";
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
ssl_dhparam /etc/nginx/ssl-dhparam.pem;
# EOF

View File

@@ -0,0 +1,12 @@
---
- name: restart-nginx
become: true
ansible.builtin.systemd:
name: nginx
state: restarted
- name: restart-grafana
become: true
ansible.builtin.systemd:
name: grafana-server
state: restarted

View File

@@ -0,0 +1,48 @@
---
- name: Install Grafana Enterprise repository
become: true
ansible.builtin.copy:
src: grafana.repo
dest: /etc/yum.repos.d/grafana.repo
owner: root
group: "{{ ansible_user }}"
mode: 0644
register: _grafana_repo
- name: Install Grafana repository GPG key
become: true
ansible.builtin.rpm_key:
state: present
key: https://packages.grafana.com/gpg.key
- name: Install Grafana
become: true
ansible.builtin.dnf:
name: grafana
state: present
update_cache: "{{ _grafana_repo.changed }}"
- name: Enable and start Grafana
become: true
ansible.builtin.systemd:
name: grafana-server
state: started
enabled: true
- name: Fetch installed grafana plugins
become: true
ansible.builtin.command:
cmd: grafana-cli plugins ls
changed_when: false
register: _grafana_plugins_raw
- name: Install plugins
become: true
ansible.builtin.command:
cmd: grafana-cli plugins install {{ item }}
changed_when: item not in _grafana_plugins_raw.stdout
notify: [restart-grafana]
loop:
- marcusolsson-json-datasource
- grafana-clock-panel
- ayoungprogrammer-finance-datasource

View File

@@ -0,0 +1,6 @@
---
- name: Install and configure Grafana
ansible.builtin.import_tasks: grafana.yaml
- name: Install and configure Nginx
ansible.builtin.import_tasks: nginx.yaml

View File

@@ -0,0 +1,107 @@
---
- name: Install nginx
become: true
ansible.builtin.dnf:
name: nginx
state: present
- name: Enable and start nginx
become: true
ansible.builtin.systemd:
name: nginx
state: started
enabled: true
- name: Configure firewall for Nginx
become: true
ansible.posix.firewalld:
service: "{{ item }}"
state: enabled
zone: internal
permanent: true
immediate: true
loop:
- http
- https
- name: Configure SELinux for Nginx
when: ansible_selinux.status | default("") == "enabled"
become: true
ansible.posix.seboolean:
name: httpd_can_network_connect
state: true
persistent: true
notify: [restart-nginx]
- name: Create certificate directory
become: true
ansible.builtin.file:
path: "{{ dashboard_certificate_directory }}"
state: directory
owner: nginx
group: "{{ ansible_user }}"
mode: 0570
- name: Generate X509 private key
become: true
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.crypto.openssl_privatekey:
path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key"
type: RSA
size: 8192
passphrase: "{{ dashboard_certificate_password }}"
cipher: auto
owner: nginx
group: "{{ ansible_user }}"
mode: 0460
- name: Install private key password file
become: true
ansible.builtin.copy:
content: "{{ dashboard_certificate_password }}"
dest: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.password"
owner: nginx
group: "{{ ansible_user }}"
mode: 0460
- name: Create self-signed certificate
become: true
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.crypto.x509_certificate:
path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.pem"
privatekey_path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key"
privatekey_passphrase: "{{ dashboard_certificate_password }}"
provider: selfsigned
owner: nginx
group: "{{ ansible_user }}"
mode: 0460
notify: [restart-nginx]
- name: Copy nginx SSL parameters
become: true
ansible.builtin.copy:
src: ssl-options.conf
dest: /etc/nginx/ssl-options.conf
owner: nginx
group: "{{ ansible_user }}"
mode: 0664
notify: [restart-nginx]
- name: Export Diffie-Hellman parameters
become: true
ansible.builtin.command:
cmd: openssl dhparam -out /etc/nginx/ssl-dhparam.pem 2048
creates: /etc/nginx/ssl-dhparam.pem
notify: [restart-nginx]
- name: Configure nginx server
become: true
ansible.builtin.template:
src: nginx.conf.j2
dest: /etc/nginx/conf.d/{{ dashboard_hostname }}.conf
owner: nginx
group: "{{ ansible_user }}"
mode: 0444
notify: [restart-nginx]

View File

@@ -0,0 +1,29 @@
# Ansible managed file - DO NOT MANUALLY EDIT
#
server {
server_name {{ dashboard_hostname }};
root /usr/share/nginx/html;
location / {
proxy_pass http://127.0.0.1:3000/;
proxy_set_header Host $host;
}
listen 443 ssl http2;
ssl_certificate {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.pem;
ssl_certificate_key {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key;
ssl_password_file {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.password;
include /etc/nginx/ssl-options.conf;
}
server {
if ($host = {{ dashboard_hostname }}) {
return 301 https://$host$request_uri;
}
server_name {{ dashboard_hostname }};
listen 80;
return 404;
}
#
# EOF

View File

@@ -0,0 +1,15 @@
---
dashboard_certificate_directory: /etc/nginx/certs
dashboard_certificate_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
62373265623036656632396637363539313437656433656461356561393538333536303961363462
3964353831633165363430313533623563343732623930630a393030393336613563313431306233
62393235303234336365313138633137663430653061343737616466303136616130643061356566
3165313038393163340a396365643335343332333335363539326635633466313264373639353930
36646462396139346432353233646635303031613639323266366235373132346363653431323666
38336365303431646530613030613437663035613332653865366432636238303437323633666239
64366435353762656362666531393865383639343461616365316634326334623733653664666161
63366234646466326531363666633966326462373562313839393731633931383762306663396562
65663031653661333439373461333234613863623364643464323863656630386561316565353232
35313338373631356231376361346662353365373030653965626434336339613936656138656637
666430306334623563306236616663623438

View File

@@ -0,0 +1,67 @@
---
argument_specs:
main:
description: >-
This role makes several assumptions about the local storage configuration of the server:
1. There is one block device on the server that will be used for data storage
2. That block device will be joined to a glusterfs volume
3. The block device is encrypted with LUKS
This role mostly serves to perform housekeeping tasks and validation of expected configs.
Automating disk configuration seems like a really good way to lose all my data, so I decided
to leave that to the much more reliable manual configuration for the time being.
To that end, here is a quick cheatsheet of commands that might be useful in setting up
storage device(s) for this role (replace `DEVICE` with the block device for storage):
```bash
# Encrypt a block device, provide encryption key when prompted
cryptsetup luksFormat --type luks2 /dev/DEVICE
# Unlock encrypted block device and mount under a mapper
cryptsetup luksOpen /dev/DEVICE LABEL
# Lock an encrypted block device
cryptsetup luksClose LABEL
# Create and format a partition on the encrypted block device
mkfs.xfs /dev/mapper/LABEL -L LABEL
# Run from an existing server already in the gluster pool
# Add server to the gluster pool
gluster peer probe HOSTNAME
# To replace a brick from an already offline'd node, the old brick first needs to be force
# removed, replication reduced, and (if arbiter is enabled) any arbiter nodes removed
#
# Remove arbiter brick
gluster volume remove-brick VOLUME replica 2 HOSTNAME:/EXPORT force
# Remove dead data brick
gluster volume remove-brick VOLUME replica 1 HOSTNAME:/EXPORT force
# Remove dead node
gluster peer detach HOSTNAME
# Add new data brick
gluster volume add-brick VOLUME replica 2 HOSTNAME:/EXPORT start
#
# To re-add the arbiter you might need to clean up the `.glusterfs` directory and remove
# directory parametes from the old brick. These next commands need to be run on the host
# with the arbiter brick physically attached
#
rm -rf /EXPORT/.glusterfs
setfattr -x trusted.gfid /EXPORT
setfattr -x trusted.glusterfs.volume-id /EXPORT
# Re-add arbiter brick
gluster volume add-brick VOLUME replica 3 arbiter 1 HOSTNAME:/EXPORT
# Trigger a resync
gluster volume heal datastore
# General gluster debug info
gluster volume info VOLUME
gluster volume status VOLUME
```
options:
skylab_datastore_device:
description: The block device under `/dev/` that should be configured as datastore storage
type: str
required: true

View File

@@ -0,0 +1,52 @@
---
- name: Allow gluster through firewall
become: true
ansible.posix.firewalld:
service: glusterfs
state: enabled
zone: trusted
immediate: true
permanent: true
- name: Create datastore directory
become: true
ansible.builtin.file:
path: /mnt/brick/datastore
state: directory
- name: Start and disable glusterd
become: true
ansible.builtin.systemd:
name: glusterd
state: started
enabled: false
- name: Fetch peer status
become: true
ansible.builtin.command:
cmd: gluster peer status
changed_when: false
register: _gluster_peer_status_raw
- name: Check peer status
ansible.builtin.assert:
that:
- not _gluster_peer_status_raw.stdout_lines[0].strip().endswith('0')
fail_msg: >-
ERROR: Datastore host '{{ inventory_hostname }}' is not joined to the gluster pool. Run the
command 'gluster peer probe {{ inventory_hostname }}.local' from another datastore host to
add it.
success_msg: >-
Datastore host {{ inventory_hostname }} is joined to the gluster pool
- name: Mount gluster volume
become: true
ansible.posix.mount:
path: /mnt/datastore
src: localhost:/datastore
state: mounted
fstype: glusterfs
# Note that this just needs to be any path *other* than the actual
# fstab. This is done just to prevent the devices from being
# automatically mounted at boot
fstab: "{{ skylab_state_dir }}/mounts"

View File

@@ -0,0 +1,9 @@
---
- name: Install datastore packages
ansible.builtin.import_tasks: packages.yaml
- name: Configure mounting
ansible.builtin.import_tasks: mounts.yaml
- name: Configure glusterfs
ansible.builtin.import_tasks: gluster.yaml

View File

@@ -0,0 +1,109 @@
---
- name: Create mount points
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
owner: root
group: "{{ ansible_user }}"
loop:
- /mnt/datastore
- /mnt/brick
- name: Determine current mounts
vars:
_current_mounts: []
ansible.builtin.set_fact:
_current_mounts: "{{ _current_mounts + [item.mount] }}"
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.mount }}"
- name: Ensure mount points are empty when unmounted
when: item not in _current_mounts
ansible.builtin.command:
cmd: "/usr/bin/ls {{ item }}"
changed_when: false
failed_when: _mountpoint_ls_raw.stdout
register: _mountpoint_ls_raw
loop:
- /mnt/datastore
- /mnt/brick
- name: Fetch block device information
ansible.builtin.command:
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
changed_when: false
register: _lsblk_info_raw
- name: Process block device information
ansible.builtin.set_fact:
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
- name: Check state of the datastore device
ansible.builtin.assert:
that: _datastore_device_info.fstype == "crypto_LUKS"
fail_msg: >-
ERROR: Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }}
must be LUKS encrypted
success_msg: >-
Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is
LUKS encrypted
- name: Determine whether datastore block is decrypted
ansible.builtin.set_fact:
_datastore_device_is_decrypted: "{{ _datastore_device_info.children is defined }}"
- name: Decrypt datastore block
when: not _datastore_device_is_decrypted
block:
- name: Prompt for decryption key
no_log: true
when: skylab_datastore_encryption_password is not defined
ansible.builtin.pause:
prompt: >-
Datastore device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is not
decrypted. Enter decryption passphrase to continue GlusterFS brick configuration
echo: false
register: _luks_decryption_key
- name: Open LUKS device
become: true
community.crypto.luks_device:
device: /dev/{{ skylab_datastore_device }}
state: opened
name: brick
passphrase: "{{ _luks_decryption_key.user_input | default(skylab_datastore_encryption_password) }}"
- name: Fetch updated block device information
ansible.builtin.command:
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
changed_when: false
register: _lsblk_info_raw
- name: Process updated block device information
ansible.builtin.set_fact:
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
- name: Create dummy fstab
ansible.builtin.file:
state: touch
path: "{{ skylab_state_dir }}/mounts"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0644
access_time: preserve
modification_time: preserve
- name: Mount datastore block
become: true
ansible.posix.mount:
path: /mnt/brick
src: UUID={{ _datastore_device_info.children[0].uuid }}
state: mounted
fstype: "{{ _datastore_device_info.children[0].fstype }}"
# Note that this just needs to be any path *other* than the actual
# fstab. This is done just to prevent the devices from being
# automatically mounted at boot
fstab: "{{ skylab_state_dir }}/mounts"

View File

@@ -0,0 +1,31 @@
---
- name: Install gluster repository
become: true
ansible.builtin.dnf:
name: centos-release-gluster9
state: present
register: _datastore_repo_gluster
- name: Enable required repositories
become: true
ansible.builtin.lineinfile:
path: /etc/yum.repos.d/{{ item }}.repo
line: enabled=1
state: present
regexp: "#?enabled=(0|1)"
loop:
- Rocky-AppStream
- Rocky-PowerTools
register: _datastore_repo_powertools
- name: Install datastore packages
become: true
when: ansible_distribution == "Rocky"
ansible.builtin.dnf:
state: present
update_cache: "{{ _datastore_repo_powertools.changed or _datastore_repo_gluster.changed }}"
name:
- cryptsetup-luks
- glusterfs
- glusterfs-fuse
- glusterfs-server

View File

@@ -0,0 +1,6 @@
---
- name: restart-sshd
become: true
ansible.builtin.systemd:
name: sshd
state: restarted

View File

@@ -0,0 +1,20 @@
---
- name: Enable systemd-firewalld
become: true
ansible.builtin.systemd:
name: firewalld
state: started
enabled: true
- name: Configure firewall interface zones
become: true
when: item.value.firewall is defined
ansible.posix.firewalld:
interface: "{{ item.key }}"
zone: "{{ item.value.firewall }}"
state: enabled
permanent: true
immediate: true
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"

View File

@@ -0,0 +1,32 @@
---
- name: Retrieve current hostsfile contents
ansible.builtin.command:
cmd: cat /etc/hosts
changed_when: false
register: _existing_hostsfile_raw
- name: Assemble hostsfile lines
vars:
_hostsfile_lines: []
ansible.builtin.set_fact:
_hostsfile_lines: "{{ _hostsfile_lines + [hostvars[item].skylab_cluster.address.internal | ansible.netcommon.ipaddr('address') + ' ' + item + '.local ' + hostvars[item].skylab_legacy_names | default([]) | join(' ')] }}"
loop: "{{ groups.cluster }}"
- name: Configure local hostsfile
become: true
ansible.builtin.lineinfile:
path: /etc/hosts
line: "{{ item }}"
state: present
loop: "{{ _hostsfile_lines }}"
loop_control:
label: "{{ item.partition(' ')[0] }}"
- name: Remove unmanaged hostsfile entries
become: true
when: "'localhost' not in item and item not in _hostsfile_lines"
ansible.builtin.lineinfile:
path: /etc/hosts
line: "{{ item }}"
state: absent
loop: "{{ _existing_hostsfile_raw.stdout_lines }}"

View File

@@ -0,0 +1,24 @@
---
- name: Configure sudoers file
ansible.builtin.import_tasks: sudoers.yaml
- name: Configure SSH server
ansible.builtin.import_tasks: sshd.yaml
- name: Configure network settings
when: skylab_networking is defined
ansible.builtin.include_tasks: networkd.yaml
- name: Configure firewall settings
when: skylab_networking is defined
ansible.builtin.include_tasks: firewalld.yaml
- name: Configure hostsfile
when: "inventory_hostname in groups.cluster"
ansible.builtin.import_tasks: hosts.yaml
- name: Enable tmpfs mount
become: true
ansible.builtin.systemd:
name: tmp.mount
enabled: true

View File

@@ -0,0 +1,97 @@
---
- name: Configure network settings
become: true
block:
- name: Install systemd-networkd on Rocky
ansible.builtin.dnf:
name: systemd-networkd
state: present
- name: Ensure network config directory exists
ansible.builtin.file:
path: /etc/systemd/network
state: directory
owner: root
group: root
mode: 0755
- name: Create network files
ansible.builtin.template:
src: network.j2
dest: /etc/systemd/network/{{ item.key }}.network
mode: 0644
owner: root
group: "{{ ansible_user }}"
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create netdev files
when: item.value.device is defined
ansible.builtin.template:
src: netdev.j2
dest: /etc/systemd/network/{{ item.key }}.netdev
mode: 0644
owner: root
group: "{{ ansible_user }}"
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Fetch existing network config directory contents
changed_when: false
ansible.builtin.command:
cmd: /usr/bin/ls /etc/systemd/network
register: _network_config_dir_raw
- name: Remove legacy network config files
when: item.strip().replace('.netdev', '').replace('.network', '') not in skylab_networking
ansible.builtin.file:
path: /etc/systemd/network/{{ item }}
state: absent
loop: "{{ _network_config_dir_raw.stdout_lines }}"
loop_control:
label: "{{ item.strip() }}"
- name: Configure fallback DNS
ansible.builtin.lineinfile:
path: /etc/systemd/resolved.conf
create: false
line: FallbackDNS=
- name: Enable systemd-networkd
ansible.builtin.systemd:
name: "{{ item }}"
enabled: true
loop:
- systemd-networkd
- systemd-networkd-wait-online
- systemd-resolved
- name: Disable NetworkManager
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
loop:
- NetworkManager
- NetworkManager-wait-online
- name: Start systemd-resolved to enable symlink creation
ansible.builtin.systemd:
name: systemd-resolved
state: started
- name: Link system resolv.conf to systemd-resolved
ansible.builtin.file:
dest: /etc/resolv.conf
src: /run/systemd/resolve/resolv.conf
state: link
force: true
setype: net_conf_t
- name: Link systemd-resolved to multi-user target
ansible.builtin.file:
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
src: /usr/lib/systemd/system/systemd-resolved.service
state: link
force: true

View File

@@ -0,0 +1,40 @@
---
- name: Configure SSH authentication settings
become: true
ansible.builtin.replace:
path: /etc/ssh/sshd_config
regexp: "{{ item.regex }}"
replace: "{{ item.value }}"
notify: [restart-sshd]
loop:
- regex: "^.*PermitRootLogin (yes|no).*$"
value: PermitRootLogin no
- regex: "^.*PasswordAuthentication (yes|no).*$"
value: PasswordAuthentication no
- regex: "^.*ChallengeResponseAuthentication (yes|no).*$"
value: ChallengeResponseAuthentication no
- regex: "^.*GSSAPIAuthentication (yes|no).*$"
value: GSSAPIAuthentication no
loop_control:
label: "{{ item.value }}"
- name: Disable dynamic MOTD on debian systems
when: ansible_os_family == "Debian"
ansible.builtin.replace:
path: /etc/pam.d/sshd
regexp: "^session optional pam_motd.so motd=/run/motd.dynamic"
replace: "#session optional pam_motd.so motd=/run/motd.dynamic"
- name: Disable Cockpit activation message on Rocky
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.file:
path: /etc/motd.d/cockpit
state: absent
- name: Copy MOTD to remote
become: true
ansible.builtin.template:
src: motd.j2
dest: /etc/motd
mode: 0644

View File

@@ -0,0 +1,30 @@
---
- name: Disable sudo password for WHEEL group
when: ansible_distribution == "Rocky" or ansible_distribution == "CentOS"
become: true
ansible.builtin.copy:
content: "%wheel ALL=(ALL) NOPASSWD: ALL"
dest: /etc/sudoers.d/30-wheel
owner: root
group: "{{ ansible_user }}"
mode: 0644
# Note that the cleanup tasks need to be after the new installation tasks
# since one or more files being cleaned up might be being relied on to
# allow ansible access
- name: Fetch content of sudoers config directory
become: true
changed_when: false
ansible.builtin.command:
cmd: /usr/bin/ls /etc/sudoers.d/
register: _sudoers_files_raw
- name: Remove legacy sudoers config files
when: item.strip() not in ["30-wheel"]
become: true
ansible.builtin.file:
path: /etc/sudoers.d/{{ item.strip() }}
state: absent
loop: "{{ _sudoers_files_raw.stdout_lines }}"
loop_control:
label: "/etc/sudoers.d/{{ item.strip() }}"

View File

@@ -0,0 +1,11 @@
/####### /## /## /## /## /## /####### /#####
/## /##___/## /##___/## /## /##___/## /##__/##
/##____ /####### /######## /## /######## /######
######## /## |## /## /## /## /## /##__/##
/## /## |## /####### /## /## /## /######
____/## /##_____
/###### ******************* /######## ************
✨ {{ skylab_description }} @{{ skylab_location }}
{{ ' ' }}

View File

@@ -0,0 +1,18 @@
# ANSIBLE MANAGED FILE - DO NOT MANUALLY EDIT
#
[NetDev]
Name={{ item.key }}
Kind={{ item.value.device }}
{% if item.value.device.lower() == 'bond' %}
[Bond]
Mode={{ item.value.bond_mode | default('balance-rr') }}
PrimaryReselectPolicy=always
MIIMonitorSec=1s
{% endif %}
{% if item.value.device.lower() == 'vlan' %}
[VLAN]
Id={{ item.key.partition('.')[2] }}
{% endif %}
# EOF

View File

@@ -0,0 +1,32 @@
# ANSIBLE MANAGED FILE - DO NOT EDIT
#
[Match]
Name={{ item.key }}
[Network]
DHCP={{ "Yes" if item.value.dhcp | default(false) else "No" }}
IPv6AcceptRA=No
{% if item.value.dns is defined %}
{% for server in item.value.dns %}
DNS={{ server }}
{% endfor %}
{% endif %}
{% if item.value.bond is defined %}
Bond={{ item.value.bond }}
{% endif %}
{% if not item.value.dhcp | default(false) %}
{% if item.value.gateway is defined %}
Gateway={{ item.value.gateway | ansible.netcommon.ipaddr('address') }}
{% endif %}
{% for address in item.value.addresses | default([]) %}
Address={{ address | ansible.netcommon.ipaddr('host/prefix') }}
{% endfor %}
{% endif %}
{% for interface in skylab_networking.keys() %}
{% if interface.startswith(item.key) and interface.partition('.')[2] | regex_search('^[0-9]{1,4}$') and interface != item.key %}
VLAN={{ interface }}
{% endif %}
{% endfor %}
# EOF

View File

@@ -0,0 +1,69 @@
---
- name: Check cluster swarm status
run_once: true
block:
- name: Fetch cluster server swarm info
delegate_to: "{{ item }}"
ansible.builtin.command:
cmd: !unsafe docker info --format '{{json .Swarm}}'
changed_when: false
register: _docker_cluster_swarm_state_raw
loop: "{{ groups.cluster }}"
- name: Process cluster server swarm info
vars:
_docker_cluster_swarm_state: {}
ansible.builtin.set_fact:
_docker_cluster_swarm_state: "{{ _docker_cluster_swarm_state | combine({item.item: (item.stdout | from_json)}) }}"
loop: "{{ _docker_cluster_swarm_state_raw.results }}"
loop_control:
label: "{{ item.item }}"
- name: Identify swarm managers
vars:
_docker_cluster_swarm_managers: []
when: item.value.LocalNodeState == 'active' and item.value.ControlAvailable
ansible.builtin.set_fact:
_docker_cluster_swarm_managers: "{{ _docker_cluster_swarm_managers + [item.key] }}"
loop: "{{ _docker_cluster_swarm_state | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Check that swarm managers were discovered
ansible.builtin.assert:
that:
- _docker_cluster_swarm_managers
fail_msg: >-
ERROR: None of the member cluster servers ({{ groups.cluster | join(', ') }}) are joined to
a docker swarm or is a swarm manager. Please join at least one cluster server to a swarm and
promote it to swarm manager
success_msg: >-
Identified {{ _docker_cluster_swarm_managers | count }} swarm managers
({{ _docker_cluster_swarm_managers | join(', ') }})
- name: Determine swarm manager cluster IDs
vars:
_docker_cluster_swarm_manager_cluster_ids: []
ansible.builtin.set_fact:
_docker_cluster_swarm_manager_cluster_ids: "{{ _docker_cluster_swarm_manager_cluster_ids + [_docker_cluster_swarm_state[item].Cluster.ID] }}"
loop: "{{ _docker_cluster_swarm_managers }}"
- name: Check swarm managers are part of the same swarm
ansible.builtin.assert:
that:
- _docker_cluster_swarm_manager_cluster_ids | unique | count == 1
fail_msg: >-
ERROR: Swarm managers ({{ _docker_cluster_swarm_managers | join(', ') }}) appear to be
joined to different swarms
(IDs {{ _docker_cluster_swarm_manager_cluster_ids | join(', ') }})
success_msg: >-
Swarm managers are joined to swarm with ID
{{ _docker_cluster_swarm_manager_cluster_ids[0] }}
- name: Determine swarm manager to use for host configuration
ansible.builtin.set_fact:
_docker_swarm_manager: "{{ _docker_cluster_swarm_managers[0] }}"
- name: Determine whether host needs to be added to the swarm
ansible.builtin.set_fact:
_docker_swarm_needs_join: "{{ not _docker_cluster_swarm_state[inventory_hostname].Cluster.ID | default('') == _docker_cluster_swarm_manager_cluster_ids[0] }}"

View File

@@ -0,0 +1,53 @@
---
- name: Determine docker daemon DNS servers
vars:
_docker_daemon_dns: []
ansible.builtin.set_fact:
_docker_daemon_dns: "{{ _docker_daemon_dns + (item.value.dns | default([])) }}"
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create docker config directory
become: true
ansible.builtin.file:
path: /etc/docker
state: directory
owner: "{{ ansible_user }}"
group: docker
mode: 0750
- name: Configure docker daemon
become: true
ansible.builtin.template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
mode: 0640
owner: "{{ ansible_user }}"
group: docker
- name: Start and enable docker service
become: true
ansible.builtin.systemd:
name: docker
state: started
enabled: true
- name: Include access variables
ansible.builtin.include_vars:
file: vars/access.yaml
- name: Add administrators to docker group
become: true
when: item.admin | default(false) and 'cluster' in (item.targets | default([]))
ansible.builtin.user:
name: "{{ item.name }}"
group: "{{ item.name }}"
groups: docker
append: true
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.name }},{{ item.uid }}"
- name: Reset connection to get new group membership
ansible.builtin.meta: reset_connection

View File

@@ -0,0 +1,61 @@
---
- name: Fetch glusterfs plugin state
block:
- name: Fetch glusterfs storage plugin state
ansible.builtin.command:
cmd: docker plugin inspect glusterfs
changed_when: false
register: _docker_glusterfs_storage_plugin_raw
rescue:
- name: Install glusterfs storage plugin
ansible.builtin.command:
cmd: docker plugin install --alias glusterfs mochoa/glusterfs-volume-plugin --grant-all-permissions --disable
changed_when: true
- name: Fetch glusterfs storage plugin state
ansible.builtin.command:
cmd: docker plugin inspect glusterfs
changed_when: false
register: _docker_glusterfs_storage_plugin_raw
- name: Process glusterfs storage plugin config
ansible.builtin.set_fact:
_docker_glusterfs_storage_plugin: "{{ (_docker_glusterfs_storage_plugin_raw.stdout | from_json)[0] }}"
# Note that this might not end up being defined if the plugin has not been configured
- name: Identify plugin server settings
when: item.startswith('SERVERS')
ansible.builtin.set_fact:
_docker_glusterfs_existing_setting: "{{ item }}"
loop: "{{ _docker_glusterfs_storage_plugin.Settings.Env }}"
- name: Determine gluster servers
run_once: true
vars:
_docker_glusterfs_hostnames: []
ansible.builtin.set_fact:
_docker_glusterfs_hostnames: "{{ _docker_glusterfs_hostnames + [item + '.local'] }}"
loop: "{{ groups.cluster }}"
- name: Determine gluster plugin setting
ansible.builtin.set_fact:
_docker_glusterfs_setting: "SERVERS={{ _docker_glusterfs_hostnames | join(',') }}"
- name: Configure plugin
when: _docker_glusterfs_setting != _docker_glusterfs_existing_setting
block:
- name: Disable plugin
when: _docker_glusterfs_storage_plugin.Enabled
ansible.builtin.command:
cmd: docker plugin disable glusterfs
- name: Set plugin servers setting
changed_when: true
ansible.builtin.command:
cmd: docker plugin set glusterfs {{ _docker_glusterfs_setting }}
register: _docker_glusterfs_set_setting
- name: Enable plugin
when: not _docker_glusterfs_storage_plugin.Enabled or _docker_glusterfs_set_setting.changed | default(false)
ansible.builtin.command:
cmd: docker plugin enable glusterfs

View File

@@ -0,0 +1,26 @@
---
- name: Install Docker repository
become: true
ansible.builtin.get_url:
url: https://download.docker.com/linux/centos/docker-ce.repo
dest: /etc/yum.repos.d/docker-ce.repo
owner: root
group: "{{ ansible_user }}"
mode: 0644
register: _docker_repo_status
- name: Install docker repository GPG key
become: true
ansible.builtin.rpm_key:
key: https://download.docker.com/linux/centos/gpg
state: present
- name: Install Docker
become: true
ansible.builtin.dnf:
state: present
name:
- docker-ce
- docker-ce-cli
- containerd.io
update_cache: "{{ _docker_repo_status.changed }}"

View File

@@ -0,0 +1,48 @@
---
- name: Fetch join token from existing manager
delegate_to: "{{ _docker_swarm_manager }}"
changed_when: false
ansible.builtin.command:
cmd: docker swarm join-token manager --quiet
register: _docker_swarm_join_token
- name: Fetch manager addresses from existing manager
delegate_to: "{{ _docker_swarm_manager }}"
changed_when: false
ansible.builtin.command:
cmd: !unsafe docker info --format '{{json .Swarm.RemoteManagers}}'
register: _docker_swarm_manager_info_raw
- name: Process manager addresses
vars:
_docker_swarm_manager_addresses: []
ansible.builtin.set_fact:
_docker_swarm_manager_addresses: "{{ _docker_swarm_manager_addresses + [item.Addr] }}"
loop: "{{ _docker_swarm_manager_info_raw.stdout | from_json }}"
- name: Join node to swarm
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_swarm:
state: join
advertise_addr: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.internal).ipv4.address }}"
listen_addr: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.internal).ipv4.address }}"
remote_addrs: "{{ _docker_swarm_manager_addresses }}"
join_token: "{{ _docker_swarm_join_token.stdout.strip() }}"
timeout: 1200
- name: Fetch node swarm ID
ansible.builtin.command:
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
changed_when: false
register: _docker_node_id_raw
# For newly added nodes we don't want to have services be automatically scheduled on them
# until the configuration is complete. The node-up playbook will be responsible for updating
# the node to make it available in the cluster again
- name: Update node to drain
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_node:
availability: drain
hostname: "{{ _docker_node_id_raw.stdout.strip() }}"

View File

@@ -0,0 +1,21 @@
---
- name: Install Docker
ansible.builtin.import_tasks: install.yaml
- name: Configure Docker
ansible.builtin.import_tasks: configure.yaml
# This taskfile will set two facts that will be used in subsequent tasks:
# * _docker_swarm_needs_join: a boolean indicating whether the host needs to be joined to the swarm
# or is already joined
# * _docker_swarm_manager: the inventory hostname of a swarm manager that can be delegated to to
# fetch swarm joining info
- name: Check swarm state ahead of swarm configuration
ansible.builtin.import_tasks: check.yaml
- name: Join server to swarm
when: _docker_swarm_needs_join
ansible.builtin.include_tasks: join.yaml
- name: Configure gluster storage driver
ansible.builtin.import_tasks: gluster.yaml

View File

@@ -0,0 +1,7 @@
{
"dns": [
{% for dns_server in _docker_daemon_dns %}
"{{ dns_server }}"{{ ',' if not loop.last else '' }}
{% endfor %}
]
}

View File

@@ -0,0 +1,2 @@
[org/gnome/login-screen]
disable-user-list=true

View File

@@ -0,0 +1,2 @@
[org/gnome/mutter]
experimental-features=['scale-monitor-framebuffer']

View File

@@ -0,0 +1,47 @@
if [ -f `which powerline-daemon` ]; then
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
. /usr/share/powerline/bash/powerline.sh
fi
export NVM_DIR="$HOME/.nvm"
export PROJECTS_DIR="$HOME/projects"
function gg() {
cd "$PROJECTS_DIR/$1";
if [ -f "$PROJECTS_DIR/$1/ansible.cfg" ]; then
ANSIBLE_CONFIG="$PROJECTS_DIR/$1/ansible.cfg" ANSIBLE_COLLECTIONS_PATH="$PROJECTS_DIR/$1/.ansible" poetry shell;
elif [ -f "$PROJECTS_DIR/$1/pyproject.toml" ]; then
poetry shell;
fi
}
mpw() {
_copy() {
if hash pbcopy 2>/dev/null; then
pbcopy
elif hash xclip 2>/dev/null; then
xclip -selection clip
else
cat; echo 2>/dev/null
return
fi
echo >&2 "Copied!"
}
# Empty the clipboard
:| _copy 2>/dev/null
# Ask for the user's name and password if not yet known.
MPW_FULLNAME="Ethan Paul"
# Start Master Password and copy the output.
printf %s "$(MPW_FULLNAME=$MPW_FULLNAME command mpw "$@")" | _copy
}
alias explorer='nautilus'
alias doc='cd ~/Documents'
alias dn='cd ~/Downloads'
alias prun="poetry run"
alias psync="poetry install --remove-untracked"

View File

@@ -0,0 +1,3 @@
user-db:user
system-db:gdm
file-db:/usr/share/gdm/greeter-dconf-defaults

View File

@@ -0,0 +1,2 @@
user-db:user
system-db:local

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 664 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 243 KiB

View File

@@ -0,0 +1,6 @@
---
- name: dconf-update
become: true
changed_when: true
ansible.builtin.command:
cmd: dconf update

View File

@@ -0,0 +1,144 @@
---
- name: Install user bashrc
become: true
ansible.builtin.copy:
src: bashrc.sh
dest: ~{{ item }}/.bashrc_ansible
owner: "{{ ansible_user }}"
group: "{{ item }}"
mode: 0644
loop: "{{ _local_human_users }}"
- name: Configure user bashrc loading
become: true
ansible.builtin.lineinfile:
path: ~{{ item }}/.bashrc
line: source ~/.bashrc_ansible
state: present
loop: "{{ _local_human_users }}"
- name: Configure local bash completions loading
become: true
ansible.builtin.lineinfile:
path: ~{{ item }}/.bashrc
line: source ~/.config/bash_completions
state: present
loop: "{{ _local_human_users }}"
- name: Configure bash completions
become: true
ansible.builtin.blockinfile:
path: ~{{ item }}/.config/bash_completions
create: true
block: >-
function _gg_completion() {
local cur=${COMP_WORDS[COMP_CWORD]};
COMPREPLY=( $(compgen -W "$(command ls $PROJECTS_DIR)" -- $cur) );
}
complete -F _gg_completion gg
owner: "{{ ansible_user }}"
group: "{{ item }}"
mode: 0664
loop: "{{ _local_human_users }}"
- name: Enforce ownership of the SSH keys
become: true
ansible.builtin.file:
path: ~{{ item.0 }}/.ssh/id_ed25519{{ item.1 }}
state: file
owner: "{{ item.0 }}"
group: "{{ item.0 }}"
loop: "{{ _local_human_users | product(['', '.pub']) }}"
- name: Configure dconf setting
become: true
block:
- name: Create dconf config directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: root
group: "{{ ansible_user }}"
mode: 0755
loop:
- /etc/dconf/profile
- /etc/dconf/db/gdm.d
- name: Create global dconf config
ansible.builtin.copy:
src: gdm-system
dest: /etc/dconf/profile/gdm
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Create user dconf config
ansible.builtin.copy:
src: gdm-user
dest: /etc/dconf/profile/user
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Disable user list
ansible.builtin.copy:
src: 00-disable-user-list
dest: /etc/dconf/db/gdm.d/00-disable-user-list
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Enable fractional scaling
ansible.builtin.copy:
src: 00-enable-fractional-scaling
dest: /etc/dconf/db/local.d/00-enable-fractional-scaling
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Install themes
become: true
block:
- name: Create local themes directory
ansible.builtin.file:
path: ~{{ item }}/.themes
state: directory
owner: "{{ item }}"
group: "{{ item }}"
mode: 0750
loop: "{{ _local_human_users }}"
- name: Unarchive LightningBug into local directory
ansible.builtin.unarchive:
src: lightningbug-dark.tar.gz
dest: ~{{ item }}/.themes
owner: "{{ item }}"
group: "{{ item }}"
loop: "{{ _local_human_users }}"
- name: Install wallpaper
become: true
ansible.builtin.copy:
src: wallpaper-{{ inventory_hostname }}.jpg
dest: ~{{ item }}/Pictures/wallpaper.jpg
owner: "{{ item }}"
group: "{{ item }}"
loop: "{{ _local_human_users }}"
- name: Link external media directory
become: true
ansible.builtin.file:
path: ~{{ item }}/Drives
src: /run/media/{{ item }}
state: link
force: true
loop: "{{ _local_human_users }}"

View File

@@ -0,0 +1,59 @@
---
- name: Check for MPW binary
ansible.builtin.stat:
path: /usr/local/bin/mpw
register: _mpw_binary_stat
- name: Install MPW
when: (not _mpw_binary_stat.stat.exists) or (force_reinstall | default(false))
block:
- name: Install build dependencies on Fedora
when: ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
name:
- libsodium-devel
state: present
- name: Create temporary build directory
ansible.builtin.tempfile:
prefix: ansible.build.mpw
state: directory
register: _mpw_build_dir
- name: Download MPW source
ansible.builtin.git:
repo: https://gitlab.com/MasterPassword/MasterPassword.git
version: 344771db
recursive: false # does *not* clone submodules
dest: "{{ _mpw_build_dir.path }}"
# God I hate this
- name: Patch .gitmodules to use HTTPS
ansible.builtin.replace:
path: "{{ _mpw_build_dir.path }}/.gitmodules"
regexp: "url = git://"
replace: "url = https://"
- name: Initialize submodules
ansible.builtin.command:
cmd: git submodule update --init
chdir: "{{ _mpw_build_dir.path }}"
- name: Build MasterPassword binary
ansible.builtin.command:
cmd: bash build
chdir: "{{ _mpw_build_dir.path }}/platform-independent/cli-c/"
- name: Copy binary to system path
become: true
ansible.builtin.copy:
remote_src: true
src: "{{ _mpw_build_dir.path }}/platform-independent/cli-c/mpw"
dest: "/usr/local/bin"
mode: 0755
always:
- name: Remove temporary directory
ansible.builtin.file:
path: "{{ _mpw_build_dir.path }}"
state: absent

View File

@@ -0,0 +1,79 @@
---
- name: Check whether binary exists
become: true
ansible.builtin.stat:
path: "~{{ local_username }}/.local/bin/MultiMC"
register: _multimc_stat
- name: Install MultiMC
when: (not _multimc_stat.stat.exists) or (force_reinstall | default(false))
block:
- name: Create temp dir
ansible.builtin.tempfile:
state: directory
register: _multimc_tempdir
- name: Download and unpack distribution archive
ansible.builtin.unarchive:
src: https://files.multimc.org/downloads/mmc-stable-lin64.tar.gz
remote_src: true
dest: "{{ _multimc_tempdir.path }}"
- name: Ensure ~/.local/share/ exists
become: true
ansible.builtin.file:
path: ~{{ local_username }}/.local/share
state: directory
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Ensure ~/.local/bin/ exists
become: true
ansible.builtin.file:
path: ~{{ local_username }}/.local/bin
state: directory
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Copy MMC distribution to ~/.local/share/
become: true
ansible.builtin.copy:
remote_src: true
src: "{{ _multimc_tempdir.path }}/MultiMC/"
dest: "~{{ local_username }}/.local/share/multimc"
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Link MMC binary into ~/.local/bin/
become: true
ansible.builtin.file:
state: link
src: ~{{ local_username }}/.local/share/multimc/MultiMC
path: ~{{ local_username }}/.local/bin/MultiMC
- name: Copy application icon
become: true
ansible.builtin.copy:
src: multimc.png
dest: ~{{ local_username }}/.local/share/icons/multimc.png
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0755
- name: Template application desktop entry
become: true
ansible.builtin.template:
src: multimc.desktop.j2
dest: ~{{ local_username }}/.local/share/applications/multimc.desktop
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0755
always:
- name: Delete temp dir
ansible.builtin.file:
path: "{{ _multimc_tempdir.path }}"
state: absent

View File

@@ -0,0 +1,27 @@
---
- name: Create install directory
become: true
ansible.builtin.file:
path: /opt/pipx
state: directory
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0755
- name: Create install venv
ansible.builtin.command:
cmd: python3 -m venv /opt/pipx
creates: /opt/pipx/bin/python
- name: Install pipx
ansible.builtin.pip:
name:
- pipx
executable: /opt/pipx/bin/pip
- name: Link pipx binary into system path
become: true
ansible.builtin.file:
state: link
src: /opt/pipx/bin/pipx
path: /usr/local/bin/pipx

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,53 @@
---
- name: Check whether Tor Browser is already installed
become: true
ansible.builtin.stat:
path: "~{{ local_username }}/.local/share/tor-browser/start-tor-browser.desktop"
register: _torbrowser_stat
- name: Install Tor Browser
when: not _torbrowser_stat.stat.exists
block:
- name: Create temp dir
ansible.builtin.tempfile:
state: directory
register: _torbrowser_tempdir
- name: Download and unpack distribution archive
ansible.builtin.unarchive:
src: https://dist.torproject.org/torbrowser/11.0.10/tor-browser-linux64-11.0.10_en-US.tar.xz
remote_src: true
dest: "{{ _torbrowser_tempdir.path }}"
- name: Ensure ~/.local/share/ exists
become: true
ansible.builtin.file:
path: ~{{ local_username }}/.local/share
state: directory
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Copy Tor Browser distribution to ~/.local/share/
become: true
ansible.builtin.copy:
remote_src: true
src: "{{ _torbrowser_tempdir.path }}/tor-browser_en-US/"
dest: "~{{ local_username }}/.local/share/tor-browser"
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Register application
become: true
become_user: "{{ local_username }}"
changed_when: true
ansible.builtin.command:
cmd: ./start-tor-browser.desktop
chdir: ~{{ local_username }}/.local/share/tor-browser
always:
- name: Delete temp dir
ansible.builtin.file:
path: "{{ _torbrowser_tempdir.path }}"
state: absent

View File

@@ -0,0 +1,40 @@
---
- name: Include access vars
ansible.builtin.include_vars:
file: vars/access.yaml
- name: Determine local user accounts
when: skylab_targets | intersect(item.targets | default([]))
vars:
_local_users: []
ansible.builtin.set_fact:
_local_users: "{{ _local_users + [item] }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.name }},{{ item.uid }}"
- name: Determine local human user accounts
when: not (item.service | default(false))
vars:
_local_human_users: []
ansible.builtin.set_fact:
_local_human_users: "{{ _local_human_users + [item.name] }}"
loop: "{{ _local_users }}"
loop_control:
label: "{{ item.name }},{{ item.uid }}"
- name: Determine local admin user accounts
when: item.admin | default(false)
vars:
_local_admin_users: []
ansible.builtin.set_fact:
_local_admin_users: "{{ _local_admin_users + [item.name] }}"
loop: "{{ _local_users }}"
loop_control:
label: "{{ item.name }},{{ item.uid }}"
- name: Install software
ansible.builtin.import_tasks: software.yml
- name: Configure environment
ansible.builtin.import_tasks: environment.yml

View File

@@ -0,0 +1,121 @@
---
- name: Install repositories on Fedora
become: true
when: ansible_distribution == "Fedora"
block:
- name: Install RPMFusion repositories
ansible.builtin.dnf:
name:
- https://mirrors.rpmfusion.org/free/fedora/rpmfusion-free-release-{{ ansible_distribution_major_version }}.noarch.rpm
- https://mirrors.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-{{ ansible_distribution_major_version }}.noarch.rpm
state: present
disable_gpg_check: true
- name: Install Docker CE repository
ansible.builtin.yum_repository:
name: docker-ce-stable
description: Docker CE Stable - $basearch
baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable
enabled: true
gpgcheck: true
gpgkey: https://download.docker.com/linux/fedora/gpg
- name: Install VSCode repository
ansible.builtin.yum_repository:
name: vscode
description: Visual Studio Code
baseurl: https://packages.microsoft.com/yumrepos/vscode
enabled: true
gpgcheck: true
gpgkey: https://packages.microsoft.com/keys/microsoft.asc
- name: Enable Signal-Desktop COPR repository
community.general.copr:
name: luminoso/Signal-Desktop
state: enabled
- name: Install packages on Fedora
become: true
when: ansible_distribution == "Fedora"
ansible.builtin.dnf:
name:
- arc-theme
- cmake
- code # visual studio code
- deluge
- docker-ce
- gcc
- gcc-c++
- gnome-tweaks
- gnome-shell-extension-material-shell
- gnome-shell-extension-openweather
- gnome-shell-extension-system-monitor-applet
- gnome-shell-extension-vertical-overview
- gnupg2
- guvcview
- java-17-openjdk
- jq
- libffi-devel
- libvirt
- libvirt-devel
- libxml2-devel
- mediawriter
- ncurses-devel
- NetworkManager-tui
- pinta
- powerline
- python27
- python36
- python37
- python38
- python39
- python310
- ShellCheck
- signal-desktop
- steam
- systemd-devel
- texlive-fontawesome5
- texlive-roboto
- texlive-scheme-tetex
- texlive-sourcesanspro
- virt-manager
- vlc
- xclip
- yarnpkg
state: present
- name: Install unsigned packages on Fedora
when: ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
name:
# draw.io/diagrams.net
- https://github.com/jgraph/drawio-desktop/releases/download/v17.4.2/drawio-x86_64-17.4.2.rpm
# zoom
- https://zoom.us/client/latest/zoom_x86_64.rpm
state: present
disable_gpg_check: true
- ansible.builtin.import_tasks: install_mpw.yml
- ansible.builtin.import_tasks: install_nvm.yml
- ansible.builtin.import_tasks: install_pipx.yml
- ansible.builtin.import_tasks: install_poetry.yml
- ansible.builtin.import_tasks: install_postman.yml
- ansible.builtin.import_tasks: install_rustup.yml
- ansible.builtin.import_tasks: install_typora.yml
# It is now day eight hundred and thirty nine of begging the ansible devs to let
# me loop over blocks. pls bcoca i have a family
- name: Install Tor Browser
ansible.builtin.include_tasks:
file: install_tor_browser.yml
loop: "{{ _local_human_users }}"
loop_control:
loop_var: local_username
- name: Install MultiMC
ansible.builtin.include_tasks:
file: install_multimc.yml
loop: "{{ _local_human_users }}"
loop_control:
loop_var: local_username

View File

@@ -0,0 +1,9 @@
[Desktop Entry]
Name=MultiMC
Comment=Minecraft environment manager
Exec="/home/{{ local_username }}/.local/bin/MultiMC"
Terminal=false
Type=Application
Icon="/home/{{ local_username }}/.local/share/icons/multimc.png"
Categories=Gaming;Graphics;
TryExec="/home/{{ local_username }}/.local/bin/MultiMC"

3
skylab/infra/README.md Normal file
View File

@@ -0,0 +1,3 @@
# Ansible Collection - skylab.infra
Documentation for the collection.

16
skylab/infra/galaxy.yml Normal file
View File

@@ -0,0 +1,16 @@
namespace: skylab
name: core
version: 0.0.0
description: Network deployment procedures and configuration state management
authors:
- Ethan Paul <me@enp.one>
license:
- MIT
readme: README.md
tags: []
repository: https://vcs.enp.one/skylab/skylab-ansible/
build_ignore: []
dependencies:
community.general: ">=6.5.0,<7.0"
ansible.posix: ">=1.5.1,<2.0"

Some files were not shown because too many files have changed in this diff Show More