Compare commits

..

1 Commits

Author SHA1 Message Date
49abac297d Reorganize provision playbook
Split server-specific configs out into server role
2021-11-05 21:53:47 -04:00
122 changed files with 1133 additions and 7829 deletions

View File

@@ -1,9 +0,0 @@
---
skip_list:
- line-length # don't yell about line length
- meta-no-info # we don't publish to galaxy so stop yelling about it
- package-latest # we install lots of latest stuff still 😢
- experimental # no instability plz, give us a call when ur stable
warn_list:
- no-handler # good to keep, but shouldn't be fatal

4
.gitignore vendored
View File

@@ -5,7 +5,3 @@ playbooks/testing.yml
*.idea
**/__pycache__/
.venv/
.ansible/
.tox/
.terraform/
.terraform.lock.*

View File

@@ -32,11 +32,3 @@ repos:
- "--wrap=90"
types:
- markdown
- id: terraform
name: terraform format
entry: terraform
language: system
args:
- fmt
files: ".*\\.tf$"

View File

@@ -1,7 +0,0 @@
---
yaml-files:
- "*.yml"
- "*.yaml"
rules:
line-length: disable

View File

@@ -1,9 +0,0 @@
clean:
rm --recursive --force .ansible/
rm --recursive --force .tox/
dev:
@poetry install --sync
@poetry run pre-commit install
@poetry run ansible-galaxy collection install --requirements-file ./requirements.yaml --collections-path ./.ansible
@bash ./link-local-collections.sh

View File

@@ -2,28 +2,6 @@
Ansible configs for the Skylab Homelab
## Local workstation setup:
```bash
make dev
poetry run ansible-playbook ...
```
## Boostraping remote system for management:
1. Install a supported operating system: [Rocky Linux](https://rockylinux.org),
[Fedora](https://getfedora.org)
2. During installation create a user named `ansible` with any password
3. After installation copy SSH key to the `ansible` user
4. Enable password-less sudo access for the `ansible` user with this command:
```bash
sudo tee /etc/sudoers.d/30-ansible <<<"ansible ALL=(ALL) NOPASSWD:ALL"
```
5. Change the UID/GID of the `ansible` user/group to `1400` with these commands:
```bash
sudo usermod -u 1400 ansible
sudo groupmod -g 1400 ansible
```
Main entrypoint is through the `ansible` script in this repository. The script sets up
basic environment variables to avoid conflicts with other environments and sets the
inventory.

7
ansible Executable file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
ANSIBLE_LIBRARY='' \
ANSIBLE_FILTER_PLUGINS='' \
ANSIBLE_CONFIG='' \
ANSIBLE_INVENTORY=$(pwd)/inventory.yaml \
"ansible-$1" ${@:2}

View File

@@ -1,10 +1,8 @@
[defaults]
host_key_checking = true
collections_path = .ansible
inventory = inventory/
host_key_checking = false
[ssh_connection]
ssh_args = "-o ControlMaster=auto -o ControlPersist=60s"
ssh_args = "-o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes"
[inventory]
enable_plugins = ansible.builtin.yaml

95
inventory.yaml Normal file
View File

@@ -0,0 +1,95 @@
---
all:
vars:
skylab_state_dir: /var/run/skylab
skylab_ansible_venv: "{{ skylab_state_dir }}/ansible-runtime"
skylab_pip_version: 19.3.1
ansible_user: ansible
ansible_ssh_common_args: "-o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes"
en1:
vars:
skylab_location: Newton MA
# gross hack for now, will be refactored later
_skylab_adguard_nat_rule: 8
hosts:
core:
ansible_host: 10.42.101.1
ansible_port: 4242
ansible_network_os: edgeos
skylab_description: EN1 Core Router
iridium:
ansible_host: 10.42.101.200
skylab_description: Local Monitor Node
skylab_targets: [network]
children:
cluster:
hosts:
pegasus: # jupiter
ansible_host: 10.42.101.100
skylab_description: Arbiter Node
skylab_targets: [cluster, datastore]
skylab_cluster:
address: 10.42.101.10/24
interface: bond0
saturn: # remus
ansible_host: 10.42.101.110
skylab_description: Operation Node
skylab_cluster:
address: 10.42.101.110/24
interface: bond0
skylab_networking:
hostname: saturn.skylab.enp.one
dns: [10.42.101.1]
gateway: 10.42.101.1/24
interfaces:
bond0:
type: bond
members: [eno1, eno2]
addresses:
- 10.42.101.11/24
- 10.42.101.110/24
dhcp: false
bond0.99:
type: vlan
address: 192.168.42.20/24
dhcp: false
orion: # romulus
ansible_host: 10.42.101.120
skylab_description: Operation Node
skylab_targets: [cluster, datastore]
skylab_cluster:
address: 10.42.101.12/24
interface: bond0
skylab_networking:
hostname: orion.skylab.enp.one
dns: [10.42.101.1]
gateway: 10.42.101.1/24
interfaces:
bond0:
type: bond
members: [eno1, eno2]
addresses:
- 10.42.101.12/24
- 10.42.101.120/24
dhcp: false
bond0.99:
type: vlan
address: 192.168.42.30/24
dhcp: false
en2:
vars:
skylab_location: DigitalOcean TOR1
hosts:
hubble:
ansible_host: en2a.enp.one
skylab_description: Cloud Web Server
skylab_targets: [cloud]

View File

@@ -1,166 +0,0 @@
---
workstation:
hosts:
voyager:
skylab_description: Personal Workstation
skylab_hostname: voyager.skylab.enp.one
skylab_targets: [workstation]
en1:
vars:
skylab_location: Newton MA
skylab_dashboard: info.en1.local
# gross hack for now, will be refactored later
_skylab_adguard_nat_rule: 9
hosts:
core:
ansible_host: 10.42.101.1
ansible_port: 4242
ansible_network_os: edgeos
skylab_description: EN1 Core Router
iridium:
ansible_host: 10.42.101.200
skylab_description: Local Monitor Node
skylab_hostname: iridium.skylab.enp.one
skylab_targets: [network]
skylab_networking:
enp4s0:
firewall: internal
dhcp: false
gateway: 10.42.101.1/24
dns:
- 10.42.101.1
addresses:
- 10.42.101.200/24
children:
cluster:
vars:
skylab_targets: [cluster, datastore]
skylab_compose_version: 3.8
skylab_compose_dir: "{{ skylab_state_dir }}/compose"
hosts:
pegasus: # jupiter
ansible_host: 10.42.101.100
skylab_hostname: pegasus.skylab.enp.one
skylab_legacy_names:
- jupiter.net.enp.one
- jupiter.svr.local
skylab_description: Arbiter Node
skylab_cluster:
address:
access: 10.42.101.10/24
internal: 192.168.42.10/24
interface:
access: bond0
internal: bond0.99
skylab_datastore_device: sdb
skylab_networking:
eno1:
bond: bond0
eno2:
bond: bond0
bond0:
device: bond
firewall: internal
gateway: 10.42.101.1/24
dns:
- 10.42.101.1
addresses:
- 10.42.101.100/24
- 192.168.255.255/32
dhcp: false
bond0.99:
device: vlan
firewall: trusted
addresses:
- 192.168.42.10/24
dhcp: false
saturn: # remus
ansible_host: 10.42.101.110
skylab_hostname: saturn.skylab.enp.one
skylab_legacy_names:
- remus.net.enp.one
- remus.svr.local
skylab_description: Operational Node
skylab_cluster:
address:
access: 10.42.101.11/24
internal: 192.168.42.20/24
interface:
access: bond0
internal: bond0.99
skylab_networking:
eno1:
bond: bond0
eno2:
bond: bond0
bond0:
device: bond
firewall: internal
dhcp: false
gateway: 10.42.101.1/24
addresses:
- 10.42.101.110/24
- 192.168.255.255/32
dns:
- 10.42.101.1
bond0.99:
device: vlan
firewall: trusted
dhcp: false
addresses:
- 192.168.42.20/24
orion: # romulus
ansible_host: 10.42.101.120
skylab_hostname: orion.skylab.enp.one
skylab_legacy_names:
- romulus.net.enp.one
- romulus.svr.local
skylab_description: Operational Node
skylab_cluster:
address:
access: 10.42.101.12/24
internal: 192.168.42.30/24
interface:
access: bond0
internal: bond0.99
skylab_datastore_device: sdb
skylab_networking:
eno1:
bond: bond0
eno2:
bond: bond0
bond0:
device: bond
firewall: internal
gateway: 10.42.101.1/24
dns:
- 10.42.101.1
addresses:
- 10.42.101.120/24
- 192.168.255.255/32
dhcp: false
bond0.99:
device: vlan
firewall: trusted
addresses:
- 192.168.42.30/24
dhcp: false
en2:
vars:
skylab_location: DigitalOcean TOR1
hosts:
hubble:
ansible_host: en2a.enp.one
skylab_hostname: hubble.en2.enp.one
skylab_description: Cloud Web Server
skylab_targets: [cloud]

View File

@@ -1,51 +0,0 @@
---
en1:
vars:
skylab_location: Cambridge
children:
domain:
children:
cluster:
hosts:
canaveral:
ansible_host: 10.42.101.10
skylab_description: Compute and Storage Node
baikonur:
ansible_host: 10.42.101.11
skylab_description: Compute and Storage Node
vandenberg:
ansible_host: 10.42.101.12
skylab_description: Compute and Storage Node
andoya:
ansible_host: 10.42.101.13
skylab_description: Auxilary Compute Node
jiuquan:
ansible_host: 10.42.101.14
skylab_description: Auxilary Compute Node
datastore:
hosts:
canaveral:
skylab_datastore_block: /dev/sda
baikonur:
skylab_datastore_block: /dev/sda
vandenberg:
skylab_datastore_block: /dev/sda
hosts:
3d-printer: {}
mediastore: {}
backstore: {}
local:
hosts:
core: {}
switch-1: {}
switch-2: {}
wap-1: {}
wap-2: {}
wap-3: {}
printer: {}

View File

@@ -1,39 +0,0 @@
---
ansible_user: ansible
ansible_port: 4242
skylab_state_dir: /var/lib/skylab
skylab_ansible_venv: "{{ skylab_state_dir }}/ansible-runtime"
skylab_ansible_vault_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
61323762623165383963316238343539346336663864366631616339356564346636373561616237
6666363531393234636337656431366365343236346536320a346163353935366636303131313661
32623635363063383039363539303135393838376264356463646465376435616363376163373663
6366633665373939380a373234633365376632376433643034336539346338613566353537663731
34323464633165626133306464363464333539363761343831316565356266373833
skylab_tfstate_backend:
hostname: cluster.lab.enp.one
username: terraform
schema: terraform
port: 32421
password: !vault |
$ANSIBLE_VAULT;1.1;AES256
30313365393065316563323363663135313438616461356439366632303636343735653033363930
6334613931376566363064663539643639326363663933610a306138616362376435386466306538
30626330613932363339363438356430613461313335333536623931343436353330393433373630
3631343463616631380a386661336534663033383637666538316665303962353034376232356235
65323339353563623431666535366465353133343137653232326534326436323661636536373564
3466633762303966366366653531613261336561356531636461
skylab_mgmt:
sshport: 4242
group: skylab
user: ansible
id: 1400
sshkeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP5TGKururOa1Y+cbv8AWXYI5zhfZCDV0fsBG+33IYUc enpaul@ansible.voyager
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBf7i/8hSJDYnoD95noCJJVtSxxCp9N5EmnshALufiwm enpaul@ansible.opportunity

View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
PWD=$(pwd)
ANSIBLE_NAMESPACE="skylab"
ANSIBLE_COLLECTION_DIR="$PWD/.ansible/ansible_collections"
mkdir --parents "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE"
for collection_path in "$PWD"/"$ANSIBLE_NAMESPACE"/*; do
collection=$(basename "$collection_path")
if [[ ! -L "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE/$collection" ]]; then
echo "Linking $ANSIBLE_NAMESPACE.$collection into $ANSIBLE_COLLECTION_DIR"
rm --recursive --force "${ANSIBLE_COLLECTION_DIR:?}/$ANSIBLE_NAMESPACE/$collection"
ln --symbolic "$PWD/$ANSIBLE_NAMESPACE/$collection" "$ANSIBLE_COLLECTION_DIR/$ANSIBLE_NAMESPACE/$collection"
fi
done
echo "Finished linking local collections"
LOCAL_COLLECTION_PATH=$(dirname "$ANSIBLE_COLLECTION_DIR")
if [ -z ${ANSIBLE_COLLECTIONS_PATH+x} ]; then
echo "WARNING: Environment variable ANSIBLE_COLLECTIONS_PATH is not set, collections will not be callable"
echo " HINT: export ANSIBLE_COLLECTIONS_PATH=$LOCAL_COLLECTION_PATH"
elif [[ ${ANSIBLE_COLLECTIONS_PATH} != *"$LOCAL_COLLECTION_PATH"* ]]; then
echo "WARNING: Environment variable ANSIBLE_COLLECTIONS_PATH does not include local collection directory"
echo " HINT: export ANSIBLE_COLLECTIONS_PATH=\$ANSIBLE_COLLECTIONS_PATH:$LOCAL_COLLECTION_PATH"
fi

23
playbooks/configure.yaml Normal file
View File

@@ -0,0 +1,23 @@
---
- name: Group hosts by platform
hosts: all
tags:
- always
pre_tasks:
- include_tasks: tasks/meta/runtime-group-determination.yaml
- name: Bootstrap remote ansible environment
hosts: linux
gather_facts: false
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
- name: Configure hosts by role
hosts: linux
gather_facts: false
roles:
- role: server
when: "'server' in skylab_roles | default([])"

1
playbooks/files Symbolic link
View File

@@ -0,0 +1 @@
../resources

View File

@@ -2,9 +2,6 @@
- name: Check cluster state
hosts: cluster
any_errors_fatal: true
pre_tasks:
- name: Configure remot execution environment
ansible.builtin.import_tasks: tasks/meta/bootstrap-remote-env.yaml
tasks:
- name: Validate user input
run_once: true
@@ -46,10 +43,10 @@
- name: Set common fact for node addresses
vars:
_node_addresses:
- "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.address }}"
- "{{ lookup('vars', 'ansible_' + skylab_cluster.interface).ipv4.address }}"
ansible.builtin.set_fact:
_node_addresses: "{{ _node_addresses + [item.address] }}"
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4_secondaries }}"
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface).ipv4_secondaries }}"
loop_control:
label: "{{ item.address }}"
@@ -62,7 +59,7 @@
when: inventory_hostname != _target_node
ansible.builtin.assert:
that:
- skylab_cluster.address.access | ansible.netcommon.ipaddr('address') in _node_addresses
- skylab_cluster.address | ansible.netcommon.ipaddr('address') in _node_addresses
- _docker_node_availability | lower == 'active'
fail_msg: >-
ERROR: Node '{{ inventory_hostname }}' is already marked as unavailable. All cluster
@@ -118,15 +115,15 @@
- name: Delete address from node
become: true
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('address') in _node_addresses
when: skylab_cluster.address | ansible.netcommon.ipaddr('address') in _node_addresses
ansible.builtin.command:
cmd: ip address delete {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
cmd: ip address delete {{ skylab_cluster.address | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface }}
changed_when: true
- name: Assign address to alt node
delegate_to: "{{ _target_alt }}"
become: true
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('address') not in hostvars[_target_alt]._node_addresses
when: skylab_cluster.address | ansible.netcommon.ipaddr('address') not in hostvars[_target_alt]._node_addresses
ansible.builtin.command:
cmd: ip address add {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ hostvars[_target_alt].skylab_cluster.interface.access }}
cmd: ip address add {{ skylab_cluster.address | ansible.netcommon.ipaddr('host/prefix') }} dev {{ hostvars[_target_alt].skylab_cluster.interface }}
changed_when: true

View File

@@ -1,5 +1,5 @@
---
- ansible.builtin.import_playbook: skylab.core.node_down
- ansible.builtin.import_playbook: node-down.yaml
- name: Shutdown node
hosts: "{{ node }}"

View File

@@ -19,20 +19,23 @@
- name: Configure common settings
hosts: linux
gather_facts: false
vars_files:
- vars/packages.yaml
tasks:
- name: Set hostname
become: true
ansible.builtin.hostname:
name: "{{ skylab_hostname | default(inventory_hostname) }}"
name: "{{ inventory_hostname }}"
use: systemd
- name: Disable case-sensitive tab-completion
- name: Install global bash config
become: true
ansible.builtin.lineinfile:
line: set completion-ignore-case On
path: /etc/inputrc
state: present
create: true
ansible.builtin.copy:
src: global.sh
dest: /etc/profile.d/ZZ-skylab-global.sh
owner: root
group: "{{ ansible_user }}"
mode: 0644
- name: Install EPEL repository config
when: ansible_distribution == "Rocky"
@@ -49,8 +52,15 @@
state: present
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
- import_playbook: skylab.core.update
- name: Install universal packages
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.dnf:
name: "{{ skylab_packages_global + skylab_packages_rocky }}"
state: present
- import_playbook: skylab.core.configure
- import_playbook: update.yaml
- import_playbook: configure.yaml

1
playbooks/tasks Symbolic link
View File

@@ -0,0 +1 @@
../tasks

1
playbooks/templates Symbolic link
View File

@@ -0,0 +1 @@
../resources

View File

@@ -24,38 +24,12 @@
- vars/packages.yaml
tasks:
- name: Update system packages via DNF
when: ansible_distribution == "Rocky" or ansible_distribution == "Fedora"
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.dnf:
name: "*"
state: latest
- name: Install global bash config
become: true
ansible.builtin.copy:
src: global.sh
dest: /etc/profile.d/ZZ-skylab-global.sh
owner: root
group: "{{ ansible_user }}"
mode: 0644
- name: Install universal packages on Rocky
when: ansible_distribution == "Rocky"
become: true
ansible.builtin.dnf:
name: "{{ skylab_packages_global + skylab_packages_rocky }}"
state: present
update_cache: true
- name: Install universal packages on Fedora
when: ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
name: "{{ skylab_packages_global + skylab_packages_fedora }}"
state: present
update_cache: true
- name: Update unix accounts
hosts: linux
tags:
@@ -84,55 +58,38 @@
cmd: 'grep "{{ skylab_group.name }}:" /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
register: _existing_skylab_accounts
- name: Determine deleted skylab users
vars:
_deleted_accounts: []
- name: Delete removed user accounts
become: true
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
ansible.builtin.set_fact:
_deleted_accounts: "{{ _deleted_accounts + [item] }}"
ansible.builtin.user:
name: "{{ item }}"
state: absent
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
- name: Delete accounts
when: _deleted_accounts | default(false)
block:
- name: Delete removed user accounts
become: true
ansible.builtin.user:
name: "{{ item }}"
state: absent
loop: "{{ _deleted_accounts }}"
- name: Delete removed user groups
become: true
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
ansible.builtin.group:
name: "{{ item }}"
state: absent
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
- name: Delete removed user groups
become: true
ansible.builtin.group:
name: "{{ item }}"
state: absent
loop: "{{ _deleted_accounts }}"
- name: Delete removed user home directories
become: true
ansible.builtin.file:
path: "/home/{{ item }}"
state: absent
loop: "{{ _deleted_accounts }}"
- name: Determine active users
when: item.targets | default([]) | intersect(skylab_targets)
vars:
_active_accounts: []
ansible.builtin.set_fact:
_active_accounts: "{{ _active_accounts + [item] }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Delete removed user home directories
become: true
when: item not in (skylab_accounts | items2dict(key_name='name', value_name='uid'))
ansible.builtin.file:
path: "/home/{{ item }}"
state: absent
loop: "{{ _existing_skylab_accounts.stdout_lines }}"
- name: Create account groups
when: item.targets | intersect(skylab_targets)
become: true
ansible.builtin.group:
name: "{{ item.name }}"
gid: "{{ item.uid }}"
state: present
loop: "{{ _active_accounts }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
@@ -140,16 +97,17 @@
ansible.builtin.set_fact:
_determined_member_groups: "{{ _determined_member_groups | default({}) | combine({item.name: [
skylab_group.name,
'wheel' if (item.admin | default(false) and ansible_os_family == 'RedHat') else '',
'wheel' if (item.admin | default(false) and ansible_distribution == 'Rocky') else '',
'sudo' if (item.admin | default(false) and ansible_os_family == 'Debian') else '',
skylab_group_admin.name if item.admin | default(false) else '',
skylab_group_automation.name if item.service | default(false) else '',
]}) }}"
loop: "{{ _active_accounts }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create accounts
when: item.targets | intersect(skylab_targets)
become: true
ansible.builtin.user:
name: "{{ item.name }}"
@@ -159,13 +117,9 @@
groups: "{{ _determined_member_groups[item.name] }}"
comment: "{{ item.fullname | default('') }}"
system: "{{ item.service | default(false) }}"
generate_ssh_key: true
ssh_key_bits: 4096
ssh_key_passphrase: "{{ item.password }}"
ssh_key_comment: "{{ item.name }}@{{ inventory_hostname }}"
ssh_key_type: ed25519
generate_ssh_key: false
password: "{{ item.password }}"
loop: "{{ _active_accounts }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
@@ -177,42 +131,7 @@
group: "{{ item.name }}"
owner: "{{ item.name }}"
mode: 0700
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create SSH directory
become: true
ansible.builtin.file:
path: /home/{{ item.name }}/.ssh
owner: "{{ item.name }}"
group: "{{ item.name }}"
state: directory
mode: 0700
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Update authorized keys
become: true
ansible.builtin.authorized_key:
user: "{{ item.name }}"
key: "{{ skylab_ssh_keys[item.name] | join('\n') }}"
state: present
exclusive: true
loop: "{{ _active_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce ownership of authorized keys
become: true
ansible.builtin.file:
path: /home/{{ item.name }}/.ssh/authorized_keys
state: file
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: 0400
loop: "{{ _active_accounts }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
@@ -222,3 +141,40 @@
name: root
password: "{{ skylab_root_password }}"
state: present
- name: Create SSH directory
become: true
ansible.builtin.file:
path: /home/{{ item.name }}/.ssh
owner: "{{ item.name }}"
group: "{{ item.name }}"
state: directory
mode: 0700
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Update authorized keys
become: true
when: item.targets | intersect(skylab_targets)
ansible.builtin.authorized_key:
user: "{{ item.name }}"
key: "{{ skylab_ssh_keys[item.name] | join('\n') }}"
state: present
exclusive: true
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce ownership of authorized keys
become: true
when: item.targets | intersect(skylab_targets)
ansible.builtin.file:
path: /home/{{ item.name }}/.ssh/authorized_keys
state: file
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: 0400
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"

1
playbooks/vars Symbolic link
View File

@@ -0,0 +1 @@
../vars/

2620
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,22 +6,22 @@ authors = ["Ethan Paul <me@enp.one>"]
license = "MIT"
[tool.poetry.dependencies]
python = "^3.10"
ansible-core = "^2.14.3"
docker = "^6.0.1"
python = "^3.8"
ansible = "^3.4.0"
docker = "^4.2.0"
docker-compose = "^1.25.4"
paramiko = "^2.7.1"
jsondiff = "^2.0.0"
jsondiff = "^1.2.0"
netaddr = "^0.8.0"
[tool.poetry.dev-dependencies]
ansible-lint = {version = "^6.14.0", markers = "platform_system != 'Windows'"}
ipython = "^8.11.0"
mdformat = "^0.7.16"
mdformat-gfm = "^0.3.5"
poetry = "^1.3.0"
pre-commit = "^3.2.0"
pre-commit-hooks = "^4.4.0"
safety = "^2.3.5"
ansible-lint = "^4.2.0"
pre-commit = "^2.9.2"
pre-commit-hooks = "^3.3.0"
safety = "^1.9.0"
tox = "^3.20.1"
tox-poetry-installer = {extras = ["poetry"], version = "^0.10.0"}
yamllint = "^1.29.0"
tox-poetry-installer = "^0.8.1"
yamllint = "^1.20.0"
mdformat = "^0.7.9"
mdformat-gfm = "^0.3.3"
ipython = "^7.28.0"

View File

@@ -1,4 +0,0 @@
---
collections:
- source: ./skylab/
type: subdirs

View File

@@ -2,12 +2,11 @@ function _parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[37m\]:\w\[\e[33m\]\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\[\e[0;97m\]$\[\e[0m\] "
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
export rc=/home/$USERNAME/.bashrc
export VIRTUALENV_DIR=/home/$USERNAME/.venvs
export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-bundle.crt
function random() {
random() {
if [[ $# -eq 0 ]]; then
num=32
else
@@ -20,10 +19,9 @@ function up() { cd $(eval printf '../'%.0s {1..$1}); }
function pipin() { pip freeze | grep $1; }
function continuous () { while true; do ${@}; sleep 3; done; }
alias bk='cd -'
alias fuck='sudo $(history -p \!\!)'
alias ls='ls -lshF --color --group-directories-first --time-style=long-iso'
alias version='uname -orp && lsb_release -a | grep Description'
alias activate='source ./bin/activate'
alias cls='clear'
@@ -34,4 +32,3 @@ alias whatismyip='curl https://icanhazip.com/'
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
alias epoch="python3 -c 'import time; print(time.time());'"
alias uptime="command uptime --pretty"
alias unmount="umount"

View File

@@ -0,0 +1 @@
%wheel ALL=(ALL) NOPASSWD: ALL

View File

@@ -0,0 +1,13 @@
---
- name: Disable sudo password for WHEEL group
when: ansible_distribution == "Rocky" or ansible_distribution == "CentOS"
become: true
ansible.builtin.copy:
src: wheel-group-no-sudo-password
dest: /etc/sudoers.d/30-wheel
owner: root
group: "{{ ansible_user }}"
mode: 0644
- name: Configure SSH server
ansible.builtin.import_tasks: sshd.yml

View File

@@ -7,16 +7,20 @@
replace: "{{ item.value }}"
notify: [restart-sshd]
loop:
- regex: "^.*PermitRootLogin (yes|no).*$"
- name: disable root login
regex: "^.*PermitRootLogin (yes|no).*$"
value: PermitRootLogin no
- regex: "^.*PasswordAuthentication (yes|no).*$"
- name: disable password auth
regex: "^.*PasswordAuthentication (yes|no).*$"
value: PasswordAuthentication no
- regex: "^.*ChallengeResponseAuthentication (yes|no).*$"
- name: disable challenge response auth
regex: "^.*ChallengeResponseAuthentication (yes|no).*$"
value: ChallengeResponseAuthentication no
- regex: "^.*GSSAPIAuthentication (yes|no).*$"
- name: disable GSSAPI auth
regex: "^.*GSSAPIAuthentication (yes|no).*$"
value: GSSAPIAuthentication no
loop_control:
label: "{{ item.value }}"
label: "{{ item.name }}"
- name: Disable dynamic MOTD on debian systems
when: ansible_os_family == "Debian"

View File

@@ -0,0 +1,11 @@
//////// /// /// /// /// /// /////// //////
/// /// /// /// /// /// /// /// /// ///
/// //////// /////// /// ///////// ///////
/////// /// /// /// /// /// /// /// ///
/// /// /// ///// /// /// /// ///////
/// ******************* /// ********************
////// /////////
> {{ skylab_description }} @{{ skylab_location }}
{{ '' }}

View File

@@ -1,3 +0,0 @@
# Ansible Collection - skylab.core
Documentation for the collection.

View File

@@ -1,26 +0,0 @@
---
namespace: skylab
name: core
version: 0.0.0
description: Network deployment procedures and configuration state management
authors:
- Ethan Paul <me@enp.one>
license:
- MIT
readme: README.md
tags: []
repository: https://vcs.enp.one/skylab/skylab-ansible/
build_ignore: []
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies:
ansible.netcommon: ">=2.5.0,<3.0.0"
ansible.posix: ">=1.3.0,<2.0.0"
ansible.utils: ">=2.4.3,<3.0.0"
community.docker: ">=2.0.2,<3.0.0"
community.network: ">=3.0.0,<4.0.0"
community.general: ">=4.1.0,<5.0.0"
community.crypto: ">=1.0.0,<2.0.0"

View File

@@ -1,2 +0,0 @@
---
requires_ansible: ">=2.11,<2.15"

View File

@@ -1,47 +0,0 @@
---
- name: Group hosts by platform
hosts: all
tags:
- always
pre_tasks:
- include_tasks: tasks/meta/runtime-group-determination.yaml
- name: Bootstrap remote ansible environment
hosts: linux
gather_facts: false
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
# [lemony snicket voice] "server" here being a word used to mean "not a workstation"
- name: Configure servers
hosts: linux:!workstation
gather_facts: false
roles:
- role: skylab.core.server
- name: Configure cluster
hosts: linux:&cluster
gather_facts: false
roles:
- role: skylab.core.datastore
- role: skylab.core.swarm
- name: Configure dashboard nodes
hosts: iridium
gather_facts: false
roles:
- role: skylab.core.dashboard
dashboard_hostname: "{{ skylab_dashboard }}"
- name: Configure workstations
hosts: workstation
gather_facts: false
roles:
- role: skylab.core.workstation

View File

@@ -1,200 +0,0 @@
---
- name: Bootstrap remote ansible environment
hosts: linux
tags:
- always
tasks:
- include_tasks: tasks/meta/bootstrap-remote-env.yaml
- name: Clean up old orechestration data
hosts: cluster
gather_facts: false
tags:
- cleanup
vars_files:
- vars/services.yaml
- vars/access.yaml
tasks:
- name: Create compose storage directory
become: true
ansible.builtin.file:
path: "{{ skylab_compose_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0770
- name: Fetch existing compose files
ansible.builtin.command:
cmd: /usr/bin/ls {{ skylab_compose_dir }}
changed_when: false
register: _compose_contents_raw
- name: Remove legacy compose files
when: item.replace('.yaml', '') not in skylab_services
ansible.builtin.file:
path: "{{ skylab_compose_dir }}/{{ item }}"
state: absent
loop: "{{ _compose_contents_raw.stdout_lines }}"
- name: Fetch existing stacks
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_stack_info: {}
register: _stack_info
- name: Remove legacy stacks
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
when: item.Orchestrator == 'Swarm' and item.Name not in skylab_services
community.docker.docker_stack:
name: "{{ item.Name }}"
state: absent
loop: "{{ _stack_info.results }}"
loop_control:
label: "{{ item.Name }}"
- name: Fetch existing Nginx configs
ansible.builtin.command:
cmd: ls {{ local_datastore_mount }}/appdata/nginx/conf.d/
changed_when: false
register: _nginx_configs
- name: Remove legacy nginx configs
when: item.replace('.conf', '') not in skylab_services
ansible.builtin.file:
path: "{{ local_datastore_mount }}/appdata/nginx/conf.d/{{ item }}.conf"
state: absent
loop: "{{ _nginx_configs.stdout_lines }}"
- name: Deploy stack service{{ (' ' + service) if service is defined else 's' }}
hosts: cluster
gather_facts: false
vars:
local_datastore_mount: /mnt/datastore
vars_files:
- vars/access.yaml
- vars/services.yaml
tasks:
- name: Validate user input
when: service is defined
ansible.builtin.assert:
that:
- service in skylab_services
- name: Determine service stacks to deploy
ansible.builtin.set_fact:
_services: "{{ {service: skylab_services[service]} if service is defined else skylab_services }}"
- name: Determine app account mapping
vars:
_service_accounts: {}
when: item.service | default(false)
ansible.builtin.set_fact:
_service_accounts: "{{ _service_accounts | combine({item.name: item}) }}"
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.name }}"
- name: Create compose directory
become: true
ansible.builtin.file:
path: "{{ skylab_compose_dir }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0770
- name: Install compose file
vars:
app: "{{ item.value }}"
_app_account: "{{ _service_accounts[item.value.user] if item.value.user is defined else false }}"
ansible.builtin.template:
src: docker-compose/{{ item.key }}.yaml.j2
dest: "{{ skylab_compose_dir }}/{{ item.key }}.yaml"
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0660
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create automation groups
become: true
when: item.value.user is defined
ansible.builtin.group:
name: "{{ item.value.user }}"
gid: "{{ _service_accounts[item.value.user].uid }}"
state: present
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create automation accounts
become: true
when: item.value.user is defined
ansible.builtin.user:
name: "{{ item.value.user }}"
state: present
uid: "{{ _service_accounts[item.value.user].uid }}"
group: "{{ item.value.user }}"
groups: "{{ [skylab_group_automation.name, skylab_group.name] }}"
system: true
generate_ssh_key: false
password: "{{ _service_accounts[item.value.user].password }}"
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Configure datastore directories
run_once: true
block:
- name: Determine volume directories
vars:
_stack_volume_directories: []
when: item.value.volumes is defined
ansible.builtin.set_fact:
_stack_volume_directories: "{{ _stack_volume_directories + [{'user': (item.value.user | default(ansible_user)), 'volumes': (item.value.volumes.values() | list)}] }}"
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create service directories
become: true
ansible.builtin.file:
path: "{{ local_datastore_mount }}{{ item.1 }}"
state: directory
owner: "{{ item.0.user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0770
loop: "{{ _stack_volume_directories | subelements('volumes') }}"
- name: Deploy stack
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_stack:
name: "{{ item.key }}"
compose:
- "{{ skylab_compose_dir }}/{{ item.key }}.yaml"
prune: false
state: present
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Configure reverse proxy
run_once: true
block:
- name: Create nginx config
when: item.value.domain is defined
ansible.builtin.template:
src: stack-nginx.conf.j2
dest: "{{ local_datastore_mount }}/appdata/nginx/conf.d/{{ item.key }}.conf"
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0464
loop: "{{ _services | dict2items }}"
loop_control:
label: "{{ item.value.domain | default(item.key) }}"

View File

@@ -1,137 +0,0 @@
#!/usr/bin/env bash
set -o pipefail
declare FMT_RESET
declare FMT_BOLD
declare FMT_GREEN
declare FMT_RED
declare NL
FMT_RESET=$(printf "\\e[0m")
FMT_BOLD=$(printf "\\e[1m")
FMT_GREEN=$(printf "\\e[32m")
FMT_RED=$(printf "\\e[31m")
NL=$'\n'
readonly FMT_RESET
readonly FMT_BOLD
readonly FMT_GREEN
readonly FMT_RED
readonly NL
function usage() {
cat << __EOF__
${FMT_GREEN}$(basename "$0")${FMT_RESET}: \
Ping hosts and print status
${FMT_BOLD}Usage:${FMT_RESET}
$(basename "$0") [-h] [--service|--network]
${FMT_GREEN}-h --help${FMT_RESET}
Print this message and exit.
${FMT_GREEN}--services${FMT_RESET}
Report service status
${FMT_GREEN}--network${FMT_RESET}
Report network status
__EOF__
}
function _fmt_online() { echo "${FMT_BOLD}${FMT_GREEN}ONLINE${FMT_RESET}"; }
function _fmt_offline() { echo "${FMT_BOLD}${FMT_RED}OFFLINE${FMT_RESET}"; }
function _test_cmd() { if eval "$1" &>/dev/null ; then echo "${2}~$(_fmt_online)"; else echo "${2}~$(_fmt_offline)"; fi }
function _test_ping() { _test_cmd "ping -W 2 -c 1 ${1}" "${2}"; }
function _test_curl_head() { _test_cmd "curl --fail --head ${1}" "${2}"; }
function _test_curl_get() { _test_cmd "curl --fail --get ${1}" "${2}"; }
function _test_curl_insecure() { _test_cmd "curl --fail --head --insecure ${1}" "${2}"; }
function _test_netcat() { _test_cmd "nc -z ${1} ${2}" "${3}"; }
function network() {
local uplink_address="1.1.1.1"
declare -a infra=("core.en1.local" "switch.en1.local" "wap-1.en1.local" "wap-2.en1.local" "wap-3.en1.local" "printer.en1.local")
declare -a infra_names=("Core Router" "Core Switch" "Wireless AP 1" "Wireless AP 2" "Wireless AP 3" "Printer")
declare -a lab=("cluster.skylab.enp.one" "pegasus.skylab.enp.one" "saturn.skylab.enp.one" "orion.skylab.enp.one" "iridium.skylab.enp.one" "en2.enp.one")
declare -a lab_names=("Cluster" "Pegasus" "Saturn" "Orion" "Iridium" "Hubble")
local output=$(_test_ping "$uplink_address" "UPLINK")
output+="${NL}";
output+="${NL}INFRASTRUCTURE~STATE${NL}"
for (( index=0; index<"${#infra[@]}"; index++ )); do
output+=$(_test_ping "${infra[$index]}" "${infra_names[$index]}")
output+="${NL}"
done
output+="${NL}HOMELAB~STATE${NL}"
for (( index=0; index<"${#lab[@]}"; index++ )); do
output+=$(_test_ping "${lab[$index]}" "${lab_names[$index]}")
output+="${NL}"
done
column -e -t -s '~' <<<"$output"
}
function services() {
local output="INTERNAL~STATE${NL}"
output+=$(_test_netcat "cluster.skylab.enp.one" "53" "AdGuard DNS")
output+="${NL}"
output+=$(_test_netcat "core.en1.local" "53" "Fallback DNS")
output+="${NL}"
output+=$(_test_curl_insecure "https://cluster.skylab.enp.one:8443/status" "Ubiquiti WLC")
output+="${NL}"
output+="${NL}PUBLIC~STATE${NL}"
output+=$(_test_curl_head "https://pms.enp.one/web/index.html" "Plex Media Server")
output+="${NL}"
output+=$(_test_netcat "cluster.skylab.enp.one" "25565" "Minecraft Server")
output+="${NL}"
output+=$(_test_curl_get "https://vcs.enp.one/api/v1/version" "Version Control")
output+="${NL}"
output+=$(_test_curl_get "https://ssv.enp.one/api/alive" "Bitwarden")
output+="${NL}"
output+=$(_test_curl_head "https://cdn.enp.one/heartbeat" "Digital Ocean CDN")
output+="${NL}"
output+=$(_test_curl_head "https://doc.enp.one/" "Documentation")
output+="${NL}"
output+=$(_test_curl_head "https://enpaul.net/" "enpaul.net")
output+="${NL}"
output+=$(_test_curl_head "https://allaroundhere.org/" "allaroundhere.org")
output+="${NL}"
output+=$(_test_curl_head "https://enp.one/" "enp.one")
output+="${NL}"
column -e -t -s'~' <<<"$output"
}
function main() {
if [[ "$1" =~ ^(-h|--help)$ ]]; then
usage;
return 0
fi
if [[ "$1" = "--network" ]]; then
network;
return 0
fi
if [[ "$1" = "--services" ]]; then
services;
return 0
fi
}
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
main "${@}"
fi

View File

@@ -1,58 +0,0 @@
---
- name: Online nodes
hosts: cluster
vars_prompt:
- name: skylab_datastore_encryption_password
prompt: Enter datastore block decryption password
private: true
pre_tasks:
- name: Configure remote execution environment
ansible.builtin.import_tasks: tasks/meta/bootstrap-remote-env.yaml
roles:
- role: skylab.core.datastore
tasks:
- name: Fetch node swarm ID
ansible.builtin.command:
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
changed_when: false
register: _docker_node_id_raw
- name: Update node availability
vars:
ansible_python_interpreter: "{{ skylab_state_dir }}/ansible-runtime/bin/python"
community.docker.docker_node:
availability: active
hostname: "{{ _docker_node_id_raw.stdout.strip() }}"
- name: Determine node addresses
vars:
_node_addresses:
- "{{ (lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.address + '/' + lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4.netmask) | ansible.netcommon.ipaddr('host/prefix') }}"
ansible.builtin.set_fact:
_node_addresses: "{{ _node_addresses + [(item.address + '/' + item.netmask) | ansible.netcommon.ipaddr('host/prefix')] }}"
loop: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.access).ipv4_secondaries }}"
loop_control:
label: "{{ (item.address + '/' + item.netmask) | ansible.netcommon.ipaddr('host/prefix') }}"
- name: Determine cluster access addresses
run_once: true
vars:
_cluster_node_ips: []
ansible.builtin.set_fact:
_cluster_node_ips: "{{ _cluster_node_ips + [hostvars[item].skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix')] }}"
loop: "{{ groups.cluster }}"
- name: Remove alternative node IPs
become: true
when: item in _cluster_node_ips and item != (skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix'))
ansible.builtin.command:
cmd: ip address delete {{ item | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
changed_when: true
loop: "{{ _node_addresses }}"
- name: Add node IP
become: true
when: skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') not in _node_addresses
ansible.builtin.command:
cmd: ip address add {{ skylab_cluster.address.access | ansible.netcommon.ipaddr('host/prefix') }} dev {{ skylab_cluster.interface.access }}
changed_when: true

View File

@@ -1,53 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
adguard:
name: adguard
driver: overlay
ipam:
driver: default
config:
- subnet: "{{ app.networks.ext }}"
volumes:
{% for key, value in app.volumes.items() %}
adguard-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
server:
image: adguard/adguardhome:{{ app.versions.server }}
hostname: adguard
networks:
- adguard
dns:
{% for server in app.settings.upstream %}
- {{ server }}
{% endfor %}
ports:
- published: {{ app.ports.53 }}
target: 53
protocol: udp
mode: ingress
- published: {{ app.ports.53 }}
target: 53
protocol: tcp
mode: ingress
- published: {{ app.ports.8064 }}
target: 8064
protocol: tcp
mode: ingress
volumes:
- type: volume
source: adguard-config
target: /opt/adguardhome/conf
read_only: false
- type: volume
source: adguard-data
target: /opt/adguardhome/work
read_only: false
deploy:
replicas: 1

View File

@@ -1,214 +0,0 @@
---
version: "{{ skylab_compose_version }}"
x-global-env: &globalenv
LOCAL_UID: "{{ _app_account.uid }}"
LOCAL_GID: "{{ _app_account.uid }}"
ASPNETCORE_ENVIRONMENT: Production
globalSettings__selfHosted: "true"
globalSettings__baseServiceUri__vault: https://{{ app.publish.domain }}
globalSettings__baseServiceUri__api: https://{{ app.publish.domain }}/api
globalSettings__baseServiceUri__identity: https://{{ app.publish.domain }}/identity
globalSettings__baseServiceUri__admin: https://{{ app.publish.domain }}/admin
globalSettings__baseServiceUri__notifications: https://{{ app.publish.domain }}/notifications
globalSettings__baseServiceUri__internalNotifications: http://bitwarden_notifications:5000
globalSettings__baseServiceUri__internalAdmin: http://bitwarden_admin:5000
globalSettings__baseServiceUri__internalIdentity: http://bitwarden_identity:5000
globalSettings__baseServiceUri__internalApi: http://bitwarden_api:5000
globalSettings__baseServiceUri__internalVault: http://bitwarden_web:5000
globalSettings__pushRelayBaseUri: https://push.bitwarden.com
globalSettings__installation__identityUri: https://identity.bitwarden.com
globalSettings__sqlServer__connectionString: "Data Source=tcp:mssql,1433;Initial Catalog=vault;Persist Security Info=False;User ID=sa;Password=e934c0bb-3b5a-4e6b-b525-cd6d83004e1a;MultipleActiveResultSets=False;Connect Timeout=30;Encrypt=True;TrustServerCertificate=True"
globalSettings__identityServer__certificatePassword: {{ app.settings.certificatePassword }}
globalSettings__attachment__baseDirectory: /etc/bitwarden/core/attachments
globalSettings__attachment__baseUrl: https://{{ app.publish.domain }}/attachments
globalSettings__dataProtection__directory: /etc/bitwarden/core/aspnet-dataprotection
globalSettings__logDirectory: /etc/bitwarden/logs
globalSettings__licenseDirectory: /etc/bitwarden/core/licenses
globalSettings__internalIdentityKey: {{ app.settings.internalIdentityKey }}
globalSettings__duo__aKey: {{ app.settings.duo__aKey }}
globalSettings__installation__id: {{ app.settings.installation__id }}
globalSettings__installation__key: {{ app.settings.installation__key }}
globalSettings__yubico__clientId: REPLACE
globalSettings__yubico__key: REPLACE
globalSettings__mail__replyToEmail: noreply@enp.one
globalSettings__mail__smtp__host: REPLACE
globalSettings__mail__smtp__port: "587"
globalSettings__mail__smtp__ssl: "false"
globalSettings__mail__smtp__username: REPLACE
globalSettings__mail__smtp__password: REPLACE
globalSettings__disableUserRegistration: "false"
globalSettings__hibpApiKey: REPLACE
adminSettings__admins: ""
volumes:
{% for key, value in app.volumes.items() %}
bitwarden-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
networks:
bitwarden_internal:
internal: true
name: bitwarden_internal
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.internal }}
bitwarden_external:
internal: false
name: bitwarden_external
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.external }}
services:
mssql:
image: bitwarden/mssql:{{ app.versions.mssql }}
stop_grace_period: 60s
networks:
- bitwarden_internal
volumes:
- bitwarden-db-data:/var/opt/mssql/data
- bitwarden-db-backup:/etc/bitwarden/mssql/backups
- bitwarden-logs-db:/var/opt/mssql/log
environment:
LOCAL_UID: "{{ _app_account.uid }}"
LOCAL_GID: "{{ _app_account.uid }}"
ACCEPT_EULA: "Y"
MSSQL_PID: Express
SA_PASSWORD: {{ app.settings.SA_PASSWORD }}
deploy:
replicas: 1
web:
image: bitwarden/web:{{ app.versions.web }}
networks:
- bitwarden_internal
volumes:
- bitwarden-web:/etc/bitwarden/web
environment: *globalenv
deploy:
replicas: 1
attachments:
image: bitwarden/attachments:{{ app.versions.attachments }}
networks:
- bitwarden_internal
volumes:
- bitwarden-core:/etc/bitwarden/core
environment: *globalenv
deploy:
replicas: 1
api:
image: bitwarden/api:{{ app.versions.api }}
volumes:
- bitwarden-core:/etc/bitwarden/core
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-api:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
identity:
image: bitwarden/identity:{{ app.versions.identity }}
volumes:
- bitwarden-identity:/etc/bitwarden/identity
- bitwarden-core:/etc/bitwarden/core
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-identity:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
admin:
image: bitwarden/admin:{{ app.versions.admin }}
depends_on:
- mssql
volumes:
- bitwarden-core:/etc/bitwarden/core
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-admin:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
icons:
image: bitwarden/icons:{{ app.versions.icons }}
volumes:
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-icons:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
notifications:
image: bitwarden/notifications:1.40.0
volumes:
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-notifications:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
events:
image: bitwarden/events:{{ app.versions.events }}
volumes:
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
- bitwarden-logs-events:/etc/bitwarden/logs
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1
nginx:
image: bitwarden/nginx:{{ app.versions.nginx }}
depends_on:
- web
- admin
- api
- identity
ports:
- published: {{ app.ports.8080 }}
target: 8080
protocol: tcp
mode: ingress
- published: {{ app.ports.8443 }}
target: 8443
protocol: tcp
mode: ingress
volumes:
- bitwarden-nginx-data:/etc/bitwarden/nginx
- bitwarden-ssl:/etc/ssl
- bitwarden-logs-nginx:/var/log/nginx
environment: *globalenv
networks:
- bitwarden_external
- bitwarden_internal
deploy:
replicas: 1

View File

@@ -1,52 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
gitea:
name: gitea
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
gitea-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
server:
image: gitea/gitea:{{ app.versions.server }}
hostname: gitea
networks:
- gitea
ports:
- published: {{ app.ports.3000 }}
target: 3000
protocol: tcp
mode: ingress
- published: {{ app.ports.22 }}
target: 22
protocol: tcp
mode: ingress
volumes:
- type: volume
source: gitea-data
target: /data
read_only: false
environment:
USER_UID: "{{ _app_account.uid }}"
USER_GID: "{{ _app_account.uid }}"
APP_NAME: ENP Version Control System
RUN_MODE: prod
ROOT_URL: https://{{ app.publish.domain }}/
DB_TYPE: sqlite3
DISABLE_REGISTRATION: "true"
deploy:
replicas: 1

View File

@@ -1,99 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
meta:
name: meta
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
meta-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
proxy:
image: nginx:{{ app.versions.proxy }}
hostname: proxy
networks:
- meta
extra_hosts:
- "dockerloopback:{{ app.settings.loopback_address }}"
ports:
- published: {{ app.ports.80 }}
target: 80
protocol: tcp
mode: ingress
- published: {{ app.ports.443 }}
target: 443
protocol: tcp
mode: ingress
volumes:
- type: volume
source: meta-nginx
target: /etc/nginx
read_only: true
- type: volume
source: meta-letsencrypt-config
target: /etc/letsencrypt
read_only: true
deploy:
replicas: 2
placement:
max_replicas_per_node: 1
certbot:
image: certbot/certbot:{{ app.versions.certbot }}
hostname: certbot
command: renew --standalone
networks:
- meta
ports:
- published: 8088 # This is hardcoded to avoid conflicts
target: 80
protocol: tcp
mode: ingress
volumes:
- type: volume
source: meta-letsencrypt-config
target: /etc/letsencrypt
read_only: false
- type: volume
source: meta-letsencrypt-data
target: /var/lib/letsencrypt
read_only: false
deploy:
replicas: 1
restart_policy:
condition: any
delay: 24h
backup:
image: rockylinux:latest
hostname: backup
command: bash /datastore/backup/mkbkup.sh /datastore/
networks:
- meta
volumes:
- type: volume
source: meta-backup
target: /datastore/backup
read_only: false
- type: volume
source: meta-appdata
target: /datastore/appdata
read_only: true
deploy:
replicas: 1
restart_policy:
condition: any
delay: 24h

View File

@@ -1,55 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
minecraft:
name: minecraft
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
minecraft-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
server:
image: itzg/minecraft-server:latest
hostname: minecraft
networks:
- minecraft
ports:
- published: {{ app.ports.25565 }}
target: 25565
protocol: tcp
mode: ingress
volumes:
- type: volume
source: minecraft-data
target: /data
read_only: false
environment:
EULA: "TRUE"
TZ: Americas/New_York
VERSION: {{ app.versions.server }}
MAX_MEMORY: "8G"
MOTD: "A home for buttery companions"
MODE: survival
OPS: {{ app.settings.admins | default([]) | join(',') }}
WHITELIST: "{{ app.settings.users | default([]) | join(',') }}"
MAX_BUILD_HEIGHT: "512"
SNOOPER_ENABLED: "false"
ICON: https://cdn.enp.one/img/logos/e-w-sm.png
ENABLE_RCON: "false"
UID: "{{ _app_account.uid }}"
GID: "{{ _app_account.uid }}"
deploy:
replicas: 1

View File

@@ -1,113 +0,0 @@
---
version: '3.7'
volumes:
photoprism-database:
name: datastore/appdata/photoprism/database
driver: glusterfs
photoprism-metadata:
name: datastore/appdata/photoprism/metadata
photoprism-originals:
name: datastore/media/photoprism
driver: glusterfs
photoprism-import:
name: datastore/media/upload
driver: glusterfs
networks:
photoprism:
internal: true
name: photoprism
driver: overlay
ipam:
driver: default
config:
- subnet: 192.168.109.0/24
services:
app:
image: photoprism/photoprism:latest
hostname: app
depends_on:
- database
networks:
- photoprism
ports:
- published: 2342
target: 2342
protocol: tcp
mode: ingress
environment:
PHOTOPRISM_ADMIN_PASSWORD: "gm2auW34GNawZ8Dqiub8W8vOlvsHCnfj"
PHOTOPRISM_SITE_URL: "http://cluster.skylab.enp.one:2342/"
PHOTOPRISM_ORIGINALS_LIMIT: 5000
PHOTOPRISM_HTTP_COMPRESSION: "gzip"
PHOTOPRISM_DEBUG: "false"
PHOTOPRISM_PUBLIC: "false"
PHOTOPRISM_READONLY: "false"
PHOTOPRISM_EXPERIMENTAL: "false"
PHOTOPRISM_DISABLE_CHOWN: "false"
PHOTOPRISM_DISABLE_WEBDAV: "false"
PHOTOPRISM_DISABLE_SETTINGS: "false"
PHOTOPRISM_DISABLE_TENSORFLOW: "false"
PHOTOPRISM_DISABLE_FACES: "false"
PHOTOPRISM_DISABLE_CLASSIFICATION: "false"
PHOTOPRISM_DARKTABLE_PRESETS: "false"
PHOTOPRISM_DETECT_NSFW: "false"
PHOTOPRISM_UPLOAD_NSFW: "true"
PHOTOPRISM_DATABASE_DRIVER: "mysql"
PHOTOPRISM_DATABASE_SERVER: "database:3306"
PHOTOPRISM_DATABASE_NAME: "photoprism"
PHOTOPRISM_DATABASE_USER: "photoprism"
PHOTOPRISM_DATABASE_PASSWORD: "KcIKhME9OwWKVz4tGyqI4VXzyDBs33Xp" # MariaDB or MySQL database user password
PHOTOPRISM_SITE_TITLE: "Skylab Images"
PHOTOPRISM_SITE_CAPTION: "Browse Your Life"
PHOTOPRISM_SITE_DESCRIPTION: ""
PHOTOPRISM_SITE_AUTHOR: "EN Paul"
HOME: "/photoprism"
PHOTOPRISM_UID: 1408
PHOTOPRISM_GID: 1408
## Hardware video transcoding config (optional)
# PHOTOPRISM_FFMPEG_BUFFERS: "64" # FFmpeg capture buffers (default: 32)
# PHOTOPRISM_FFMPEG_BITRATE: "32" # FFmpeg encoding bitrate limit in Mbit/s (default: 50)
# PHOTOPRISM_FFMPEG_ENCODER: "h264_v4l2m2m" # Use Video4Linux for AVC transcoding (default: libx264)
# PHOTOPRISM_FFMPEG_ENCODER: "h264_qsv" # Use Intel Quick Sync Video for AVC transcoding (default: libx264)
# PHOTOPRISM_INIT: "intel-graphics tensorflow-amd64-avx2" # Enable TensorFlow AVX2 & Intel Graphics support
## Enable TensorFlow AVX2 support for modern Intel CPUs (requires starting the container as root)
# PHOTOPRISM_INIT: "tensorflow-amd64-avx2"
user: "1408:1408"
working_dir: "/photoprism"
volumes:
- type: volume
source: photoprism-originals
target: /photoprism/originals
read_only: false
- type: volume
source: photoprism-metadata
target: /photoprism/storage
read_only: false
- type: volume
source: photoprism-import
target: /photoprism/import
read_only: true
deploy:
replicas: 1
database:
image: mariadb:10.6
hostname: database
command: mysqld --innodb-buffer-pool-size=128M --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=120
networks:
- photoprism
volumes:
- type: volume
source: photoprism-database
target: /var/lib/mysql
read_only: false
environment:
MYSQL_ROOT_PASSWORD: insecure
MYSQL_DATABASE: photoprism
MYSQL_USER: photoprism
MYSQL_PASSWORD: KcIKhME9OwWKVz4tGyqI4VXzyDBs33Xp
deploy:
replicas: 1

View File

@@ -1,95 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
plex:
name: plex
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
plex-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
plex-data:
name: plex-data
driver: local
driver_opts:
type: nfs
o: "addr={{ app.settings.mediastore }},ro"
device: ":/nfs/plex"
services:
server:
image: plexinc/pms-docker:{{ app.versions.server }}
hostname: plex-media-server
networks:
- plex
ports:
- published: {{ app.ports.32400 }}
target: 32400
protocol: tcp
mode: ingress
- published: {{ app.ports.3005 }}
target: 3005
protocol: tcp
mode: ingress
- published: {{ app.ports.8324 }}
target: 8324
protocol: tcp
mode: ingress
- published: {{ app.ports.32469 }}
target: 32469
protocol: tcp
mode: ingress
- published: {{ app.ports.1900 }}
target: 1900
protocol: udp
mode: ingress
- published: {{ app.ports.32410 }}
target: 32410
protocol: udp
mode: ingress
- published: {{ app.ports.32413 }}
target: 32413
protocol: udp
mode: ingress
- published: {{ app.ports.32414 }}
target: 32414
protocol: udp
mode: ingress
volumes:
- type: volume
source: plex-config
target: /config
read_only: false
- type: volume
source: plex-data
target: /data
read_only: true
- type: volume
source: plex-personal
target: /personal
read_only: false
environment:
TZ: "Americas/New_York"
ALLOWED_NETWORKS: {{ app.settings.internal_subnets | join(',') }}
PLEX_UID: "{{ _app_account.uid }}"
PLEX_GID: "{{ _app_account.uid }}"
deploy:
replicas: 1
placement:
{% if app.settings.exclude_hosts is defined %}
constraints:
{% for host in app.settings.exclude_hosts %}
- node.hostname!={{ host }}
{% endfor %}
{% endif %}

View File

@@ -1,70 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
unifi:
name: unifi
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
unifi-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
wlc:
image: jacobalberty/unifi:{{ app.versions.wlc }}
hostname: en1-unifi-wlc
init: true
networks:
- unifi
ports:
- published: {{ app.ports.8080 }}
target: 8080
protocol: tcp
mode: ingress
- published: {{ app.ports.8443 }}
target: 8443
protocol: tcp
mode: ingress
- published: {{ app.ports.8843 }}
target: 8843
protocol: tcp
mode: ingress
- published: {{ app.ports.8880 }}
target: 8880
protocol: tcp
mode: ingress
- published: {{ app.ports.3478 }}
target: 3478
protocol: udp
mode: ingress
- published: {{ app.ports.6789 }}
target: 6789
protocol: tcp
mode: ingress
- published: {{ app.ports.10001 }}
target: 10001
protocol: udp
mode: ingress
volumes:
- type: volume
source: unifi-data
target: /unifi
read_only: false
environment:
RUNAS_UID0: "false"
UNIFI_UID: "{{ _app_account.uid }}"
UNIFI_GID: "{{ _app_account.uid }}"
TZ: "Americas/New_York"
deploy:
replicas: 1

View File

@@ -1,108 +0,0 @@
---
version: "{{ skylab_compose_version }}"
networks:
vikunja:
name: vikunja
driver: overlay
ipam:
driver: default
config:
- subnet: {{ app.networks.ext }}
volumes:
{% for key, value in app.volumes.items() %}
vikunja-{{ key }}:
name: datastore{{ value }}
driver: glusterfs
{% endfor %}
services:
database:
image: mariadb:{{ app.versions.database }}
hostname: database
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
networks:
- vikunja
volumes:
- type: volume
source: vikunja-database
target: /var/lib/mysql
read_only: false
environment:
MYSQL_RANDOM_ROOT_PASSWORD: "true"
MYSQL_USER: vikunja
MYSQL_PASSWORD: {{ app.settings.database_password }}
MYSQL_DATABASE: vikunja
deploy:
replicas: 1
cache:
image: redis:{{ app.versions.cache }}
hostname: cache
networks:
- vikunja
deploy:
replicas: 1
proxy:
image: nginx:{{ app.versions.proxy }}
hostname: proxy
networks:
- vikunja
ports:
- published: {{ app.ports.80 }}
target: 80
protocol: tcp
mode: ingress
volumes:
- type: volume
source: vikunja-nginx
target: /etc/nginx/conf.d
read_only: true
deploy:
replicas: 1
api:
image: vikunja/api:{{ app.versions.api }}
hostname: api
networks:
- vikunja
depends_on:
- database
- cache
volumes:
- type: volume
source: vikunja-files
target: /app/vikunja/files
read_only: false
environment:
VIKUNJA_DATABASE_HOST: database
VIKUNJA_DATABASE_PASSWORD: {{ app.settings.database_password }}
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_REDIS_ENABLED: "1"
VIKUNJA_REDIS_HOST: cache:6379
VIKUNJA_CACHE_ENABLED: "1"
VIKUNJA_CACHE_TYPE: redis
VIKUNJA_FILES_MAXSIZE: 50MB
deploy:
replicas: 1
web:
image: vikunja/frontend:{{ app.versions.web }}
hostname: web
networks:
- vikunja
depends_on:
- database
- cache
- proxy
environment:
VIKUNJA_API_URL: https://{{ app.publish.domain }}/api/v1
deploy:
replicas: 1

View File

@@ -1,34 +0,0 @@
# Ansible managed file - do not manually edit
#
server {
server_name {{ app.publish.domain }};
root /usr/share/nginx/html;
location / {
proxy_pass http://dockerloopback:{{ app.publish.http }}/;
proxy_set_header Host $host;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/{{ app.publish.domain }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ app.publish.domain }}/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
}
server {
listen 80;
listen [::]:80;
server_name {{ app.publish.domain }};
location ^~ /.well-known/acme-challenge/ {
proxy_pass http://dockerloopback:8088/.well-known/acme-challenge/;
proxy_set_header Host $host;
}
location / {
return 301 https://$host$request_uri;
}
}
# EOF

View File

@@ -1,64 +0,0 @@
---
skylab_accounts:
- name: enpaul
uid: 1300
fullname: Ethan N. Paul
targets: [network, datastore, cluster, cloud, workstation]
admin: true
password: $6$H7jZEL2Ey93zfMTD$CzUlZkXDudPHgUMU/OFUn8/Yhzo8nBxoSI8thD15toIFlWN.iUfq/Jp5z3KpDCGTxyv.IbRTvE8dOVWjoRfgJ.
- name: ansible
uid: 1400
targets: [network, datastore, cluster, cloud, workstation]
admin: true
service: true
password: $6$qNKmYg4y9YS4f5Gr$m0mAqEVbymPguj.1cS.pfclt33Okfmn1KhFC0r1iQ3eVvz/OIZY3x0qGmPnJ1zOXDWyKKs5hnlGTAeZgCh49C.
- name: autocraft
uid: 1401
service: true
password: $6$j8aWjPweCWmNT4cZ$F7puAjVES60a5mkBg1WfYMYIGbLrs8FFowf5BPzVo9qgbMYEC53i8rHezDACZjHmOxj5PhJkmZjHD4vfbf3PC1
- name: autotea
uid: 1402
service: true
password: $6$NdUiEi2P8TI.2jgb$ai1WbWno8QK6Wg/KAV4IacwG92FctN5aQX2i45a9DSCg8k1nkwGj5gc230FXePh8P7vzQ6ifYrYsAvEfZ1J8B.
- name: autowarden
uid: 1403
service: true
password: $6$a18IBPw40.ELiuy5$RbGfNGUe9iTA6bOaKLsp7q3X3uQ3D1LP8LAF5ioQAafimVvCtwyG.O4Colo9nsl2yoCF6TMIHX9zOTavkbg7W0
- name: autoguard
uid: 1404
service: true
password: $6$PLf4ifrrk0/5AF21$ohZXy0xDzyUiHXwoCW8zvbrPHFiWSWxYx2.QlDy09wND7RbPhwxghHS0trWWVdy14jAxU45mz5YvdAl7qmEIO0
- name: autoplex
uid: 1405
service: true
password: $6$VfMCenzm5UPHrpNN$yQIpnQUZPhO9UoSylaOxR6TOrJfR5dFdzdRFzle7dP/bfeDBKxC6hsy52IEowilL.aCbYevz67R9s1hB3q9GU1
- name: autounifi
uid: 1406
service: true
password: $6$ScrKQGmAifGVDovx$wuQQhEdNya8Tjj/oTeY/eT1grLl17hSYbVeWIIqU.jv.W9vFyoVkxeE/lBcPvBe8XdGjOxWu32WsnomL8irb11
- name: autollama
uid: 1407
service: true
password: $6$lEdCBbqlWIdHuRZZ$Pr9SAybk7uCTfzjtCpSe7RrwM2TKqr8vWtLDARZRQ9e1RpNKHP2bEvkeg2VPc7oACVfxbg7Y8PP0rKOR.3fcD.
skylab_group:
name: skylab
gid: 1200
skylab_group_admin:
name: skylab_admin
gid: 1201
skylab_group_automation:
name: skylab_auto
gid: 1202
skylab_root_password: $6$FDwVi2DUVPg.LSrC$vRMIW6ah0x5cSZFLDrV2FuiwoUtYgcnJJV06gn2HxLsUnkXJ0/Sv1hjRn8v6bZy1AmkDCyQCtT6DHRRBuQspx.

View File

@@ -1,240 +0,0 @@
---
skylab_services:
meta:
networks:
ext: 192.168.99.0/24
volumes:
nginx: /appdata/nginx
letsencrypt-config: /appdata/letsencrypt/config
letsencrypt-data: /appdata/letsencrypt/data
ports:
80: 80
443: 443
versions:
proxy: latest
certbot: latest
settings:
loopback_address: 192.168.255.255
minecraft:
user: autocraft
networks:
ext: 192.168.102.0/24
volumes:
data: /appdata/minecraft
ports:
25565: 25565
versions:
server: 1.16.5
publish:
domain: mcs.enp.one
settings:
admins:
- ScifiGeek42
users:
- ScifiGeek42
- fantasycat256
- CoffeePug
- Snowdude21325
- KaiserSJR
- glutenfreebean
gitea:
user: autotea
networks:
ext: 192.168.103.0/24
volumes:
data: /appdata/gitea
ports:
3000: 3000
22: 2222
publish:
domain: vcs.enp.one
http: 3000
versions:
server: 1.15.4
bitwarden:
user: autowarden
networks:
internal: 192.168.104.0/24
external: 192.168.105.0/24
volumes:
db-data: /appdata/bitwarden/mssql/data
db-backup: /appdata/bitwarden/mssql/backup
nginx-data: /appdata/bitwarden/nginx
web: /appdata/bitwarden/web
ssl: /appdata/bitwarden/ssl
ca-certs: /appdata/bitwarden/ca-certificates
core: /appdata/bitwarden/core
identity: /appdata/bitwarden/identity
logs-api: /appdata/bitwarden/logs/api
logs-db: /appdata/bitwarden/logs/mssql
logs-identity: /appdata/bitwarden/logs/identity
logs-nginx: /appdata/bitwarden/logs/nginx
logs-admin: /appdata/bitwarden/logs/admin
logs-icons: /appdata/bitwarden/logs/icons
logs-notifications: /appdata/bitwarden/logs/notifications
logs-events: /appdata/bitwarden/logs/events
ports:
8080: 8090
8443: 8943
versions:
mssql: 1.40.0
web: 2.19.0
attachments: 1.40.0
api: 1.40.0
identity: 1.40.0
admin: 1.40.0
icons: 1.40.0
events: 1.40.0
nginx: 1.40.0
publish:
domain: ssv.enp.one
http: 8090
settings:
certificatePassword: !vault |
$ANSIBLE_VAULT;1.1;AES256
34336462333965626665636664636338353139306135393862656539623935666134666638313632
6337393734353237373233663763666566316637393436650a346134353365626637313732346565
64373866633430613637663230383866336362313739313335646330373666353536396463376364
3730306338623831300a346565613730326138333732306237333236393237653363386263376531
30653663346234383538316337386534356534316437323561646637636361396462393335316233
3931623037626539636535353963666635316334613833396437
internalIdentityKey: !vault |
$ANSIBLE_VAULT;1.1;AES256
64343365323264303635306461386464626535343138333637333035343365386138363261666561
3036376532316230326238626662663434343131393336350a363230333637373231333332356230
66383466626139396365333865663538386130633136643861353936613330613535313363323639
6538656632376330380a373534393361613234366536353866353366646263643565346534393235
30623261626364613063353839663130656436316531666431316332653330636436323331316462
3539383064363338313433343837363563313838333231363639
duo__aKey: !vault |
$ANSIBLE_VAULT;1.1;AES256
38353861643436373461393663616366383139393164366664303333333431663364613530323532
3434643335353964613464393734623934313164663339340a303831353734623332316464333735
34343961393562366435653935313038336638623061353761343538333264386638306363386463
3339346561333039650a353163633263386232646366323637383866303033356631376639383561
36316333336434393364316565353363623036613233633933616532376530653138366432303762
6532343435636261353434323461646365396538646466353032
installation__id: !vault |
$ANSIBLE_VAULT;1.1;AES256
62323837366638363735393462326566633235356261326636623239366462316465636163663063
3065613765386138653239383332306363346236666662660a313634333334396633646465356435
66666231633938613838663364323331666434383439303931393761313563663931386532336330
6433383331643933610a323565636462663865666435376334346535323964663264363039623364
32653966363634376534383664663535373830366466336463613365653463363663316165303330
3834653063653334313931643330663163386638363938643130
installation__key: !vault |
$ANSIBLE_VAULT;1.1;AES256
38353130336136623437653131316461653561393539373630623135383036643135623361613735
6431306133623866613836363361376163656434343230660a663635393861333863376461336661
30386562353730326665323030393531663234373430363639306562633031363065386665646431
3163633239366630300a313436386131376433333231346135393735373236626365393533626232
61313536323437363234396536623662613434333363326565303939363562353732
SA_PASSWORD: !vault |
$ANSIBLE_VAULT;1.1;AES256
64313236346631366338313139396532346461333835616466313037363132656632323566663138
6665393239656262363261303362303437343438626234340a663836623362353431373035356562
61383865303835323336363862303035363161376336346563323966633361333966363232393665
6166323331353065380a616138303531643063653633656561383761393433646130656432363436
62383763316130306235396338356236636263653830666139663064626633643635386237373034
3465323836373437383465316537666337373134616135626238
adguard:
user: autoguard
networks:
ext: 192.168.108.0/24
volumes:
config: /appdata/adguard/config
data: /appdata/adguard/data
ports:
53: 53
8064: 8084
versions:
server: v0.106.3
publish:
domain: adguard.en1.local
http: 8064
settings:
upstream:
- 1.1.1.1
- 1.0.0.1
plex:
user: autoplex
networks:
ext: 192.168.101.0/24
volumes:
config: /appdata/plex
ports:
32400: 32400
3005: 3005
8324: 8324
32469: 32469
1900: 1900
32410: 32410
32413: 32413
32414: 32414
versions:
server: latest
publish:
domain: pms.enp.one
http: 32400
settings:
mediastore: mediastore.skylab.enp.one
internal_subnets:
- 10.42.100.0/24
- 10.42.101.0/24
exclude_hosts:
- jupiter.net.enp.one
- pegasus.skylab.enp.one
unifi:
user: autounifi
networks:
ext: 192.168.100.0/24
volumes:
data: /appdata/unifi
ports:
8080: 8080
8443: 8443
8843: 8843
8880: 8880
3478: 3478
6789: 6789
10001: 10001
versions:
wlc: "6.2"
publish:
domain: unifi.en1.local
http: 8080
vikunja:
user: autollama
networks:
ext: 192.168.107.0/24
volumes:
database: /appdata/vikunja/database
files: /appdata/vikunja/files
nginx: /appdata/vikunja/nginx
ports:
80: 8087
versions:
database: "10"
cache: latest
proxy: latest
api: 0.18.1
web: 0.18.1
publish:
domain: org.enp.one
http: 8087
settings:
database_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
35313866386161376430383232343834633566363136323761316531663633383231653135313565
6332336461356164623237306436393131383566656233640a316262616161336331356565363963
35313430303237313039346162653564623236373564306333393362623134613437656231633635
6334616138663036610a646234366264646363353635356338633035373166343763353733336339
38663937383165386530326138363965626666386366636330343133633238636236316432613136
6662313533316563646461646336396430306466323831613730

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +0,0 @@
[grafana]
name=grafana
baseurl=https://packages.grafana.com/enterprise/rpm
repo_gpgcheck=1
enabled=1
gpgcheck=1
gpgkey=https://packages.grafana.com/gpg.key
sslverify=1
sslcacert=/etc/pki/tls/certs/ca-bundle.crt

View File

@@ -1,22 +0,0 @@
# Ansible managed file - DO NOT EDIT
#
# https://www.digitalocean.com/community/tutorials/how-to-create-a-self-signed-ssl-certificate-for-nginx-in-ubuntu-16-04
#
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
ssl_ecdh_curve secp384r1;
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off;
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 valid=300s;
resolver_timeout 5s;
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains";
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
ssl_dhparam /etc/nginx/ssl-dhparam.pem;
# EOF

View File

@@ -1,12 +0,0 @@
---
- name: restart-nginx
become: true
ansible.builtin.systemd:
name: nginx
state: restarted
- name: restart-grafana
become: true
ansible.builtin.systemd:
name: grafana-server
state: restarted

View File

@@ -1,48 +0,0 @@
---
- name: Install Grafana Enterprise repository
become: true
ansible.builtin.copy:
src: grafana.repo
dest: /etc/yum.repos.d/grafana.repo
owner: root
group: "{{ ansible_user }}"
mode: 0644
register: _grafana_repo
- name: Install Grafana repository GPG key
become: true
ansible.builtin.rpm_key:
state: present
key: https://packages.grafana.com/gpg.key
- name: Install Grafana
become: true
ansible.builtin.dnf:
name: grafana
state: present
update_cache: "{{ _grafana_repo.changed }}"
- name: Enable and start Grafana
become: true
ansible.builtin.systemd:
name: grafana-server
state: started
enabled: true
- name: Fetch installed grafana plugins
become: true
ansible.builtin.command:
cmd: grafana-cli plugins ls
changed_when: false
register: _grafana_plugins_raw
- name: Install plugins
become: true
ansible.builtin.command:
cmd: grafana-cli plugins install {{ item }}
changed_when: item not in _grafana_plugins_raw.stdout
notify: [restart-grafana]
loop:
- marcusolsson-json-datasource
- grafana-clock-panel
- ayoungprogrammer-finance-datasource

View File

@@ -1,6 +0,0 @@
---
- name: Install and configure Grafana
ansible.builtin.import_tasks: grafana.yaml
- name: Install and configure Nginx
ansible.builtin.import_tasks: nginx.yaml

View File

@@ -1,107 +0,0 @@
---
- name: Install nginx
become: true
ansible.builtin.dnf:
name: nginx
state: present
- name: Enable and start nginx
become: true
ansible.builtin.systemd:
name: nginx
state: started
enabled: true
- name: Configure firewall for Nginx
become: true
ansible.posix.firewalld:
service: "{{ item }}"
state: enabled
zone: internal
permanent: true
immediate: true
loop:
- http
- https
- name: Configure SELinux for Nginx
when: ansible_selinux.status | default("") == "enabled"
become: true
ansible.posix.seboolean:
name: httpd_can_network_connect
state: true
persistent: true
notify: [restart-nginx]
- name: Create certificate directory
become: true
ansible.builtin.file:
path: "{{ dashboard_certificate_directory }}"
state: directory
owner: nginx
group: "{{ ansible_user }}"
mode: 0570
- name: Generate X509 private key
become: true
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.crypto.openssl_privatekey:
path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key"
type: RSA
size: 8192
passphrase: "{{ dashboard_certificate_password }}"
cipher: auto
owner: nginx
group: "{{ ansible_user }}"
mode: 0460
- name: Install private key password file
become: true
ansible.builtin.copy:
content: "{{ dashboard_certificate_password }}"
dest: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.password"
owner: nginx
group: "{{ ansible_user }}"
mode: 0460
- name: Create self-signed certificate
become: true
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.crypto.x509_certificate:
path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.pem"
privatekey_path: "{{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key"
privatekey_passphrase: "{{ dashboard_certificate_password }}"
provider: selfsigned
owner: nginx
group: "{{ ansible_user }}"
mode: 0460
notify: [restart-nginx]
- name: Copy nginx SSL parameters
become: true
ansible.builtin.copy:
src: ssl-options.conf
dest: /etc/nginx/ssl-options.conf
owner: nginx
group: "{{ ansible_user }}"
mode: 0664
notify: [restart-nginx]
- name: Export Diffie-Hellman parameters
become: true
ansible.builtin.command:
cmd: openssl dhparam -out /etc/nginx/ssl-dhparam.pem 2048
creates: /etc/nginx/ssl-dhparam.pem
notify: [restart-nginx]
- name: Configure nginx server
become: true
ansible.builtin.template:
src: nginx.conf.j2
dest: /etc/nginx/conf.d/{{ dashboard_hostname }}.conf
owner: nginx
group: "{{ ansible_user }}"
mode: 0444
notify: [restart-nginx]

View File

@@ -1,29 +0,0 @@
# Ansible managed file - DO NOT MANUALLY EDIT
#
server {
server_name {{ dashboard_hostname }};
root /usr/share/nginx/html;
location / {
proxy_pass http://127.0.0.1:3000/;
proxy_set_header Host $host;
}
listen 443 ssl http2;
ssl_certificate {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.pem;
ssl_certificate_key {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.key;
ssl_password_file {{ dashboard_certificate_directory }}/{{ dashboard_hostname }}.password;
include /etc/nginx/ssl-options.conf;
}
server {
if ($host = {{ dashboard_hostname }}) {
return 301 https://$host$request_uri;
}
server_name {{ dashboard_hostname }};
listen 80;
return 404;
}
#
# EOF

View File

@@ -1,15 +0,0 @@
---
dashboard_certificate_directory: /etc/nginx/certs
dashboard_certificate_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
62373265623036656632396637363539313437656433656461356561393538333536303961363462
3964353831633165363430313533623563343732623930630a393030393336613563313431306233
62393235303234336365313138633137663430653061343737616466303136616130643061356566
3165313038393163340a396365643335343332333335363539326635633466313264373639353930
36646462396139346432353233646635303031613639323266366235373132346363653431323666
38336365303431646530613030613437663035613332653865366432636238303437323633666239
64366435353762656362666531393865383639343461616365316634326334623733653664666161
63366234646466326531363666633966326462373562313839393731633931383762306663396562
65663031653661333439373461333234613863623364643464323863656630386561316565353232
35313338373631356231376361346662353365373030653965626434336339613936656138656637
666430306334623563306236616663623438

View File

@@ -1,67 +0,0 @@
---
argument_specs:
main:
description: >-
This role makes several assumptions about the local storage configuration of the server:
1. There is one block device on the server that will be used for data storage
2. That block device will be joined to a glusterfs volume
3. The block device is encrypted with LUKS
This role mostly serves to perform housekeeping tasks and validation of expected configs.
Automating disk configuration seems like a really good way to lose all my data, so I decided
to leave that to the much more reliable manual configuration for the time being.
To that end, here is a quick cheatsheet of commands that might be useful in setting up
storage device(s) for this role (replace `DEVICE` with the block device for storage):
```bash
# Encrypt a block device, provide encryption key when prompted
cryptsetup luksFormat --type luks2 /dev/DEVICE
# Unlock encrypted block device and mount under a mapper
cryptsetup luksOpen /dev/DEVICE LABEL
# Lock an encrypted block device
cryptsetup luksClose LABEL
# Create and format a partition on the encrypted block device
mkfs.xfs /dev/mapper/LABEL -L LABEL
# Run from an existing server already in the gluster pool
# Add server to the gluster pool
gluster peer probe HOSTNAME
# To replace a brick from an already offline'd node, the old brick first needs to be force
# removed, replication reduced, and (if arbiter is enabled) any arbiter nodes removed
#
# Remove arbiter brick
gluster volume remove-brick VOLUME replica 2 HOSTNAME:/EXPORT force
# Remove dead data brick
gluster volume remove-brick VOLUME replica 1 HOSTNAME:/EXPORT force
# Remove dead node
gluster peer detach HOSTNAME
# Add new data brick
gluster volume add-brick VOLUME replica 2 HOSTNAME:/EXPORT start
#
# To re-add the arbiter you might need to clean up the `.glusterfs` directory and remove
# directory parametes from the old brick. These next commands need to be run on the host
# with the arbiter brick physically attached
#
rm -rf /EXPORT/.glusterfs
setfattr -x trusted.gfid /EXPORT
setfattr -x trusted.glusterfs.volume-id /EXPORT
# Re-add arbiter brick
gluster volume add-brick VOLUME replica 3 arbiter 1 HOSTNAME:/EXPORT
# Trigger a resync
gluster volume heal datastore
# General gluster debug info
gluster volume info VOLUME
gluster volume status VOLUME
```
options:
skylab_datastore_device:
description: The block device under `/dev/` that should be configured as datastore storage
type: str
required: true

View File

@@ -1,52 +0,0 @@
---
- name: Allow gluster through firewall
become: true
ansible.posix.firewalld:
service: glusterfs
state: enabled
zone: trusted
immediate: true
permanent: true
- name: Create datastore directory
become: true
ansible.builtin.file:
path: /mnt/brick/datastore
state: directory
- name: Start and disable glusterd
become: true
ansible.builtin.systemd:
name: glusterd
state: started
enabled: false
- name: Fetch peer status
become: true
ansible.builtin.command:
cmd: gluster peer status
changed_when: false
register: _gluster_peer_status_raw
- name: Check peer status
ansible.builtin.assert:
that:
- not _gluster_peer_status_raw.stdout_lines[0].strip().endswith('0')
fail_msg: >-
ERROR: Datastore host '{{ inventory_hostname }}' is not joined to the gluster pool. Run the
command 'gluster peer probe {{ inventory_hostname }}.local' from another datastore host to
add it.
success_msg: >-
Datastore host {{ inventory_hostname }} is joined to the gluster pool
- name: Mount gluster volume
become: true
ansible.posix.mount:
path: /mnt/datastore
src: localhost:/datastore
state: mounted
fstype: glusterfs
# Note that this just needs to be any path *other* than the actual
# fstab. This is done just to prevent the devices from being
# automatically mounted at boot
fstab: "{{ skylab_state_dir }}/mounts"

View File

@@ -1,9 +0,0 @@
---
- name: Install datastore packages
ansible.builtin.import_tasks: packages.yaml
- name: Configure mounting
ansible.builtin.import_tasks: mounts.yaml
- name: Configure glusterfs
ansible.builtin.import_tasks: gluster.yaml

View File

@@ -1,109 +0,0 @@
---
- name: Create mount points
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
owner: root
group: "{{ ansible_user }}"
loop:
- /mnt/datastore
- /mnt/brick
- name: Determine current mounts
vars:
_current_mounts: []
ansible.builtin.set_fact:
_current_mounts: "{{ _current_mounts + [item.mount] }}"
loop: "{{ ansible_mounts }}"
loop_control:
label: "{{ item.mount }}"
- name: Ensure mount points are empty when unmounted
when: item not in _current_mounts
ansible.builtin.command:
cmd: "/usr/bin/ls {{ item }}"
changed_when: false
failed_when: _mountpoint_ls_raw.stdout
register: _mountpoint_ls_raw
loop:
- /mnt/datastore
- /mnt/brick
- name: Fetch block device information
ansible.builtin.command:
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
changed_when: false
register: _lsblk_info_raw
- name: Process block device information
ansible.builtin.set_fact:
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
- name: Check state of the datastore device
ansible.builtin.assert:
that: _datastore_device_info.fstype == "crypto_LUKS"
fail_msg: >-
ERROR: Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }}
must be LUKS encrypted
success_msg: >-
Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is
LUKS encrypted
- name: Determine whether datastore block is decrypted
ansible.builtin.set_fact:
_datastore_device_is_decrypted: "{{ _datastore_device_info.children is defined }}"
- name: Decrypt datastore block
when: not _datastore_device_is_decrypted
block:
- name: Prompt for decryption key
no_log: true
when: skylab_datastore_encryption_password is not defined
ansible.builtin.pause:
prompt: >-
Datastore device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is not
decrypted. Enter decryption passphrase to continue GlusterFS brick configuration
echo: false
register: _luks_decryption_key
- name: Open LUKS device
become: true
community.crypto.luks_device:
device: /dev/{{ skylab_datastore_device }}
state: opened
name: brick
passphrase: "{{ _luks_decryption_key.user_input | default(skylab_datastore_encryption_password) }}"
- name: Fetch updated block device information
ansible.builtin.command:
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
changed_when: false
register: _lsblk_info_raw
- name: Process updated block device information
ansible.builtin.set_fact:
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
- name: Create dummy fstab
ansible.builtin.file:
state: touch
path: "{{ skylab_state_dir }}/mounts"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0644
access_time: preserve
modification_time: preserve
- name: Mount datastore block
become: true
ansible.posix.mount:
path: /mnt/brick
src: UUID={{ _datastore_device_info.children[0].uuid }}
state: mounted
fstype: "{{ _datastore_device_info.children[0].fstype }}"
# Note that this just needs to be any path *other* than the actual
# fstab. This is done just to prevent the devices from being
# automatically mounted at boot
fstab: "{{ skylab_state_dir }}/mounts"

View File

@@ -1,31 +0,0 @@
---
- name: Install gluster repository
become: true
ansible.builtin.dnf:
name: centos-release-gluster9
state: present
register: _datastore_repo_gluster
- name: Enable required repositories
become: true
ansible.builtin.lineinfile:
path: /etc/yum.repos.d/{{ item }}.repo
line: enabled=1
state: present
regexp: "#?enabled=(0|1)"
loop:
- Rocky-AppStream
- Rocky-PowerTools
register: _datastore_repo_powertools
- name: Install datastore packages
become: true
when: ansible_distribution == "Rocky"
ansible.builtin.dnf:
state: present
update_cache: "{{ _datastore_repo_powertools.changed or _datastore_repo_gluster.changed }}"
name:
- cryptsetup-luks
- glusterfs
- glusterfs-fuse
- glusterfs-server

View File

@@ -1,20 +0,0 @@
---
- name: Enable systemd-firewalld
become: true
ansible.builtin.systemd:
name: firewalld
state: started
enabled: true
- name: Configure firewall interface zones
become: true
when: item.value.firewall is defined
ansible.posix.firewalld:
interface: "{{ item.key }}"
zone: "{{ item.value.firewall }}"
state: enabled
permanent: true
immediate: true
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"

View File

@@ -1,32 +0,0 @@
---
- name: Retrieve current hostsfile contents
ansible.builtin.command:
cmd: cat /etc/hosts
changed_when: false
register: _existing_hostsfile_raw
- name: Assemble hostsfile lines
vars:
_hostsfile_lines: []
ansible.builtin.set_fact:
_hostsfile_lines: "{{ _hostsfile_lines + [hostvars[item].skylab_cluster.address.internal | ansible.netcommon.ipaddr('address') + ' ' + item + '.local ' + hostvars[item].skylab_legacy_names | default([]) | join(' ')] }}"
loop: "{{ groups.cluster }}"
- name: Configure local hostsfile
become: true
ansible.builtin.lineinfile:
path: /etc/hosts
line: "{{ item }}"
state: present
loop: "{{ _hostsfile_lines }}"
loop_control:
label: "{{ item.partition(' ')[0] }}"
- name: Remove unmanaged hostsfile entries
become: true
when: "'localhost' not in item and item not in _hostsfile_lines"
ansible.builtin.lineinfile:
path: /etc/hosts
line: "{{ item }}"
state: absent
loop: "{{ _existing_hostsfile_raw.stdout_lines }}"

View File

@@ -1,24 +0,0 @@
---
- name: Configure sudoers file
ansible.builtin.import_tasks: sudoers.yaml
- name: Configure SSH server
ansible.builtin.import_tasks: sshd.yaml
- name: Configure network settings
when: skylab_networking is defined
ansible.builtin.include_tasks: networkd.yaml
- name: Configure firewall settings
when: skylab_networking is defined
ansible.builtin.include_tasks: firewalld.yaml
- name: Configure hostsfile
when: "inventory_hostname in groups.cluster"
ansible.builtin.import_tasks: hosts.yaml
- name: Enable tmpfs mount
become: true
ansible.builtin.systemd:
name: tmp.mount
enabled: true

View File

@@ -1,97 +0,0 @@
---
- name: Configure network settings
become: true
block:
- name: Install systemd-networkd on Rocky
ansible.builtin.dnf:
name: systemd-networkd
state: present
- name: Ensure network config directory exists
ansible.builtin.file:
path: /etc/systemd/network
state: directory
owner: root
group: root
mode: 0755
- name: Create network files
ansible.builtin.template:
src: network.j2
dest: /etc/systemd/network/{{ item.key }}.network
mode: 0644
owner: root
group: "{{ ansible_user }}"
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create netdev files
when: item.value.device is defined
ansible.builtin.template:
src: netdev.j2
dest: /etc/systemd/network/{{ item.key }}.netdev
mode: 0644
owner: root
group: "{{ ansible_user }}"
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Fetch existing network config directory contents
changed_when: false
ansible.builtin.command:
cmd: /usr/bin/ls /etc/systemd/network
register: _network_config_dir_raw
- name: Remove legacy network config files
when: item.strip().replace('.netdev', '').replace('.network', '') not in skylab_networking
ansible.builtin.file:
path: /etc/systemd/network/{{ item }}
state: absent
loop: "{{ _network_config_dir_raw.stdout_lines }}"
loop_control:
label: "{{ item.strip() }}"
- name: Configure fallback DNS
ansible.builtin.lineinfile:
path: /etc/systemd/resolved.conf
create: false
line: FallbackDNS=
- name: Enable systemd-networkd
ansible.builtin.systemd:
name: "{{ item }}"
enabled: true
loop:
- systemd-networkd
- systemd-networkd-wait-online
- systemd-resolved
- name: Disable NetworkManager
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
loop:
- NetworkManager
- NetworkManager-wait-online
- name: Start systemd-resolved to enable symlink creation
ansible.builtin.systemd:
name: systemd-resolved
state: started
- name: Link system resolv.conf to systemd-resolved
ansible.builtin.file:
dest: /etc/resolv.conf
src: /run/systemd/resolve/resolv.conf
state: link
force: true
setype: net_conf_t
- name: Link systemd-resolved to multi-user target
ansible.builtin.file:
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
src: /usr/lib/systemd/system/systemd-resolved.service
state: link
force: true

View File

@@ -1,30 +0,0 @@
---
- name: Disable sudo password for WHEEL group
when: ansible_distribution == "Rocky" or ansible_distribution == "CentOS"
become: true
ansible.builtin.copy:
content: "%wheel ALL=(ALL) NOPASSWD: ALL"
dest: /etc/sudoers.d/30-wheel
owner: root
group: "{{ ansible_user }}"
mode: 0644
# Note that the cleanup tasks need to be after the new installation tasks
# since one or more files being cleaned up might be being relied on to
# allow ansible access
- name: Fetch content of sudoers config directory
become: true
changed_when: false
ansible.builtin.command:
cmd: /usr/bin/ls /etc/sudoers.d/
register: _sudoers_files_raw
- name: Remove legacy sudoers config files
when: item.strip() not in ["30-wheel"]
become: true
ansible.builtin.file:
path: /etc/sudoers.d/{{ item.strip() }}
state: absent
loop: "{{ _sudoers_files_raw.stdout_lines }}"
loop_control:
label: "/etc/sudoers.d/{{ item.strip() }}"

View File

@@ -1,11 +0,0 @@
/####### /## /## /## /## /## /####### /#####
/## /##___/## /##___/## /## /##___/## /##__/##
/##____ /####### /######## /## /######## /######
######## /## |## /## /## /## /## /##__/##
/## /## |## /####### /## /## /## /######
____/## /##_____
/###### ******************* /######## ************
✨ {{ skylab_description }} @{{ skylab_location }}
{{ ' ' }}

View File

@@ -1,18 +0,0 @@
# ANSIBLE MANAGED FILE - DO NOT MANUALLY EDIT
#
[NetDev]
Name={{ item.key }}
Kind={{ item.value.device }}
{% if item.value.device.lower() == 'bond' %}
[Bond]
Mode={{ item.value.bond_mode | default('balance-rr') }}
PrimaryReselectPolicy=always
MIIMonitorSec=1s
{% endif %}
{% if item.value.device.lower() == 'vlan' %}
[VLAN]
Id={{ item.key.partition('.')[2] }}
{% endif %}
# EOF

View File

@@ -1,32 +0,0 @@
# ANSIBLE MANAGED FILE - DO NOT EDIT
#
[Match]
Name={{ item.key }}
[Network]
DHCP={{ "Yes" if item.value.dhcp | default(false) else "No" }}
IPv6AcceptRA=No
{% if item.value.dns is defined %}
{% for server in item.value.dns %}
DNS={{ server }}
{% endfor %}
{% endif %}
{% if item.value.bond is defined %}
Bond={{ item.value.bond }}
{% endif %}
{% if not item.value.dhcp | default(false) %}
{% if item.value.gateway is defined %}
Gateway={{ item.value.gateway | ansible.netcommon.ipaddr('address') }}
{% endif %}
{% for address in item.value.addresses | default([]) %}
Address={{ address | ansible.netcommon.ipaddr('host/prefix') }}
{% endfor %}
{% endif %}
{% for interface in skylab_networking.keys() %}
{% if interface.startswith(item.key) and interface.partition('.')[2] | regex_search('^[0-9]{1,4}$') and interface != item.key %}
VLAN={{ interface }}
{% endif %}
{% endfor %}
# EOF

View File

@@ -1,69 +0,0 @@
---
- name: Check cluster swarm status
run_once: true
block:
- name: Fetch cluster server swarm info
delegate_to: "{{ item }}"
ansible.builtin.command:
cmd: !unsafe docker info --format '{{json .Swarm}}'
changed_when: false
register: _docker_cluster_swarm_state_raw
loop: "{{ groups.cluster }}"
- name: Process cluster server swarm info
vars:
_docker_cluster_swarm_state: {}
ansible.builtin.set_fact:
_docker_cluster_swarm_state: "{{ _docker_cluster_swarm_state | combine({item.item: (item.stdout | from_json)}) }}"
loop: "{{ _docker_cluster_swarm_state_raw.results }}"
loop_control:
label: "{{ item.item }}"
- name: Identify swarm managers
vars:
_docker_cluster_swarm_managers: []
when: item.value.LocalNodeState == 'active' and item.value.ControlAvailable
ansible.builtin.set_fact:
_docker_cluster_swarm_managers: "{{ _docker_cluster_swarm_managers + [item.key] }}"
loop: "{{ _docker_cluster_swarm_state | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Check that swarm managers were discovered
ansible.builtin.assert:
that:
- _docker_cluster_swarm_managers
fail_msg: >-
ERROR: None of the member cluster servers ({{ groups.cluster | join(', ') }}) are joined to
a docker swarm or is a swarm manager. Please join at least one cluster server to a swarm and
promote it to swarm manager
success_msg: >-
Identified {{ _docker_cluster_swarm_managers | count }} swarm managers
({{ _docker_cluster_swarm_managers | join(', ') }})
- name: Determine swarm manager cluster IDs
vars:
_docker_cluster_swarm_manager_cluster_ids: []
ansible.builtin.set_fact:
_docker_cluster_swarm_manager_cluster_ids: "{{ _docker_cluster_swarm_manager_cluster_ids + [_docker_cluster_swarm_state[item].Cluster.ID] }}"
loop: "{{ _docker_cluster_swarm_managers }}"
- name: Check swarm managers are part of the same swarm
ansible.builtin.assert:
that:
- _docker_cluster_swarm_manager_cluster_ids | unique | count == 1
fail_msg: >-
ERROR: Swarm managers ({{ _docker_cluster_swarm_managers | join(', ') }}) appear to be
joined to different swarms
(IDs {{ _docker_cluster_swarm_manager_cluster_ids | join(', ') }})
success_msg: >-
Swarm managers are joined to swarm with ID
{{ _docker_cluster_swarm_manager_cluster_ids[0] }}
- name: Determine swarm manager to use for host configuration
ansible.builtin.set_fact:
_docker_swarm_manager: "{{ _docker_cluster_swarm_managers[0] }}"
- name: Determine whether host needs to be added to the swarm
ansible.builtin.set_fact:
_docker_swarm_needs_join: "{{ not _docker_cluster_swarm_state[inventory_hostname].Cluster.ID | default('') == _docker_cluster_swarm_manager_cluster_ids[0] }}"

View File

@@ -1,53 +0,0 @@
---
- name: Determine docker daemon DNS servers
vars:
_docker_daemon_dns: []
ansible.builtin.set_fact:
_docker_daemon_dns: "{{ _docker_daemon_dns + (item.value.dns | default([])) }}"
loop: "{{ skylab_networking | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: Create docker config directory
become: true
ansible.builtin.file:
path: /etc/docker
state: directory
owner: "{{ ansible_user }}"
group: docker
mode: 0750
- name: Configure docker daemon
become: true
ansible.builtin.template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
mode: 0640
owner: "{{ ansible_user }}"
group: docker
- name: Start and enable docker service
become: true
ansible.builtin.systemd:
name: docker
state: started
enabled: true
- name: Include access variables
ansible.builtin.include_vars:
file: vars/access.yaml
- name: Add administrators to docker group
become: true
when: item.admin | default(false) and 'cluster' in (item.targets | default([]))
ansible.builtin.user:
name: "{{ item.name }}"
group: "{{ item.name }}"
groups: docker
append: true
loop: "{{ skylab_accounts }}"
loop_control:
label: "{{ item.name }},{{ item.uid }}"
- name: Reset connection to get new group membership
ansible.builtin.meta: reset_connection

View File

@@ -1,61 +0,0 @@
---
- name: Fetch glusterfs plugin state
block:
- name: Fetch glusterfs storage plugin state
ansible.builtin.command:
cmd: docker plugin inspect glusterfs
changed_when: false
register: _docker_glusterfs_storage_plugin_raw
rescue:
- name: Install glusterfs storage plugin
ansible.builtin.command:
cmd: docker plugin install --alias glusterfs mochoa/glusterfs-volume-plugin --grant-all-permissions --disable
changed_when: true
- name: Fetch glusterfs storage plugin state
ansible.builtin.command:
cmd: docker plugin inspect glusterfs
changed_when: false
register: _docker_glusterfs_storage_plugin_raw
- name: Process glusterfs storage plugin config
ansible.builtin.set_fact:
_docker_glusterfs_storage_plugin: "{{ (_docker_glusterfs_storage_plugin_raw.stdout | from_json)[0] }}"
# Note that this might not end up being defined if the plugin has not been configured
- name: Identify plugin server settings
when: item.startswith('SERVERS')
ansible.builtin.set_fact:
_docker_glusterfs_existing_setting: "{{ item }}"
loop: "{{ _docker_glusterfs_storage_plugin.Settings.Env }}"
- name: Determine gluster servers
run_once: true
vars:
_docker_glusterfs_hostnames: []
ansible.builtin.set_fact:
_docker_glusterfs_hostnames: "{{ _docker_glusterfs_hostnames + [item + '.local'] }}"
loop: "{{ groups.cluster }}"
- name: Determine gluster plugin setting
ansible.builtin.set_fact:
_docker_glusterfs_setting: "SERVERS={{ _docker_glusterfs_hostnames | join(',') }}"
- name: Configure plugin
when: _docker_glusterfs_setting != _docker_glusterfs_existing_setting
block:
- name: Disable plugin
when: _docker_glusterfs_storage_plugin.Enabled
ansible.builtin.command:
cmd: docker plugin disable glusterfs
- name: Set plugin servers setting
changed_when: true
ansible.builtin.command:
cmd: docker plugin set glusterfs {{ _docker_glusterfs_setting }}
register: _docker_glusterfs_set_setting
- name: Enable plugin
when: not _docker_glusterfs_storage_plugin.Enabled or _docker_glusterfs_set_setting.changed | default(false)
ansible.builtin.command:
cmd: docker plugin enable glusterfs

View File

@@ -1,26 +0,0 @@
---
- name: Install Docker repository
become: true
ansible.builtin.get_url:
url: https://download.docker.com/linux/centos/docker-ce.repo
dest: /etc/yum.repos.d/docker-ce.repo
owner: root
group: "{{ ansible_user }}"
mode: 0644
register: _docker_repo_status
- name: Install docker repository GPG key
become: true
ansible.builtin.rpm_key:
key: https://download.docker.com/linux/centos/gpg
state: present
- name: Install Docker
become: true
ansible.builtin.dnf:
state: present
name:
- docker-ce
- docker-ce-cli
- containerd.io
update_cache: "{{ _docker_repo_status.changed }}"

View File

@@ -1,48 +0,0 @@
---
- name: Fetch join token from existing manager
delegate_to: "{{ _docker_swarm_manager }}"
changed_when: false
ansible.builtin.command:
cmd: docker swarm join-token manager --quiet
register: _docker_swarm_join_token
- name: Fetch manager addresses from existing manager
delegate_to: "{{ _docker_swarm_manager }}"
changed_when: false
ansible.builtin.command:
cmd: !unsafe docker info --format '{{json .Swarm.RemoteManagers}}'
register: _docker_swarm_manager_info_raw
- name: Process manager addresses
vars:
_docker_swarm_manager_addresses: []
ansible.builtin.set_fact:
_docker_swarm_manager_addresses: "{{ _docker_swarm_manager_addresses + [item.Addr] }}"
loop: "{{ _docker_swarm_manager_info_raw.stdout | from_json }}"
- name: Join node to swarm
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_swarm:
state: join
advertise_addr: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.internal).ipv4.address }}"
listen_addr: "{{ lookup('vars', 'ansible_' + skylab_cluster.interface.internal).ipv4.address }}"
remote_addrs: "{{ _docker_swarm_manager_addresses }}"
join_token: "{{ _docker_swarm_join_token.stdout.strip() }}"
timeout: 1200
- name: Fetch node swarm ID
ansible.builtin.command:
cmd: !unsafe docker info --format '{{ .Swarm.NodeID}}'
changed_when: false
register: _docker_node_id_raw
# For newly added nodes we don't want to have services be automatically scheduled on them
# until the configuration is complete. The node-up playbook will be responsible for updating
# the node to make it available in the cluster again
- name: Update node to drain
vars:
ansible_python_interpreter: "{{ skylab_ansible_venv }}/bin/python"
community.docker.docker_node:
availability: drain
hostname: "{{ _docker_node_id_raw.stdout.strip() }}"

View File

@@ -1,21 +0,0 @@
---
- name: Install Docker
ansible.builtin.import_tasks: install.yaml
- name: Configure Docker
ansible.builtin.import_tasks: configure.yaml
# This taskfile will set two facts that will be used in subsequent tasks:
# * _docker_swarm_needs_join: a boolean indicating whether the host needs to be joined to the swarm
# or is already joined
# * _docker_swarm_manager: the inventory hostname of a swarm manager that can be delegated to to
# fetch swarm joining info
- name: Check swarm state ahead of swarm configuration
ansible.builtin.import_tasks: check.yaml
- name: Join server to swarm
when: _docker_swarm_needs_join
ansible.builtin.include_tasks: join.yaml
- name: Configure gluster storage driver
ansible.builtin.import_tasks: gluster.yaml

View File

@@ -1,7 +0,0 @@
{
"dns": [
{% for dns_server in _docker_daemon_dns %}
"{{ dns_server }}"{{ ',' if not loop.last else '' }}
{% endfor %}
]
}

View File

@@ -1,2 +0,0 @@
[org/gnome/login-screen]
disable-user-list=true

View File

@@ -1,2 +0,0 @@
[org/gnome/mutter]
experimental-features=['scale-monitor-framebuffer']

View File

@@ -1,47 +0,0 @@
if [ -f `which powerline-daemon` ]; then
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
. /usr/share/powerline/bash/powerline.sh
fi
export NVM_DIR="$HOME/.nvm"
export PROJECTS_DIR="$HOME/projects"
function gg() {
cd "$PROJECTS_DIR/$1";
if [ -f "$PROJECTS_DIR/$1/ansible.cfg" ]; then
ANSIBLE_CONFIG="$PROJECTS_DIR/$1/ansible.cfg" ANSIBLE_COLLECTIONS_PATH="$PROJECTS_DIR/$1/.ansible" poetry shell;
elif [ -f "$PROJECTS_DIR/$1/pyproject.toml" ]; then
poetry shell;
fi
}
mpw() {
_copy() {
if hash pbcopy 2>/dev/null; then
pbcopy
elif hash xclip 2>/dev/null; then
xclip -selection clip
else
cat; echo 2>/dev/null
return
fi
echo >&2 "Copied!"
}
# Empty the clipboard
:| _copy 2>/dev/null
# Ask for the user's name and password if not yet known.
MPW_FULLNAME="Ethan Paul"
# Start Master Password and copy the output.
printf %s "$(MPW_FULLNAME=$MPW_FULLNAME command mpw "$@")" | _copy
}
alias explorer='nautilus'
alias doc='cd ~/Documents'
alias dn='cd ~/Downloads'
alias prun="poetry run"
alias psync="poetry install --remove-untracked"

View File

@@ -1,3 +0,0 @@
user-db:user
system-db:gdm
file-db:/usr/share/gdm/greeter-dconf-defaults

View File

@@ -1,2 +0,0 @@
user-db:user
system-db:local

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 664 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 243 KiB

View File

@@ -1,6 +0,0 @@
---
- name: dconf-update
become: true
changed_when: true
ansible.builtin.command:
cmd: dconf update

View File

@@ -1,144 +0,0 @@
---
- name: Install user bashrc
become: true
ansible.builtin.copy:
src: bashrc.sh
dest: ~{{ item }}/.bashrc_ansible
owner: "{{ ansible_user }}"
group: "{{ item }}"
mode: 0644
loop: "{{ _local_human_users }}"
- name: Configure user bashrc loading
become: true
ansible.builtin.lineinfile:
path: ~{{ item }}/.bashrc
line: source ~/.bashrc_ansible
state: present
loop: "{{ _local_human_users }}"
- name: Configure local bash completions loading
become: true
ansible.builtin.lineinfile:
path: ~{{ item }}/.bashrc
line: source ~/.config/bash_completions
state: present
loop: "{{ _local_human_users }}"
- name: Configure bash completions
become: true
ansible.builtin.blockinfile:
path: ~{{ item }}/.config/bash_completions
create: true
block: >-
function _gg_completion() {
local cur=${COMP_WORDS[COMP_CWORD]};
COMPREPLY=( $(compgen -W "$(command ls $PROJECTS_DIR)" -- $cur) );
}
complete -F _gg_completion gg
owner: "{{ ansible_user }}"
group: "{{ item }}"
mode: 0664
loop: "{{ _local_human_users }}"
- name: Enforce ownership of the SSH keys
become: true
ansible.builtin.file:
path: ~{{ item.0 }}/.ssh/id_ed25519{{ item.1 }}
state: file
owner: "{{ item.0 }}"
group: "{{ item.0 }}"
loop: "{{ _local_human_users | product(['', '.pub']) }}"
- name: Configure dconf setting
become: true
block:
- name: Create dconf config directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: root
group: "{{ ansible_user }}"
mode: 0755
loop:
- /etc/dconf/profile
- /etc/dconf/db/gdm.d
- name: Create global dconf config
ansible.builtin.copy:
src: gdm-system
dest: /etc/dconf/profile/gdm
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Create user dconf config
ansible.builtin.copy:
src: gdm-user
dest: /etc/dconf/profile/user
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Disable user list
ansible.builtin.copy:
src: 00-disable-user-list
dest: /etc/dconf/db/gdm.d/00-disable-user-list
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Enable fractional scaling
ansible.builtin.copy:
src: 00-enable-fractional-scaling
dest: /etc/dconf/db/local.d/00-enable-fractional-scaling
owner: root
group: "{{ ansible_user }}"
mode: 0644
notify:
- dconf-update
- name: Install themes
become: true
block:
- name: Create local themes directory
ansible.builtin.file:
path: ~{{ item }}/.themes
state: directory
owner: "{{ item }}"
group: "{{ item }}"
mode: 0750
loop: "{{ _local_human_users }}"
- name: Unarchive LightningBug into local directory
ansible.builtin.unarchive:
src: lightningbug-dark.tar.gz
dest: ~{{ item }}/.themes
owner: "{{ item }}"
group: "{{ item }}"
loop: "{{ _local_human_users }}"
- name: Install wallpaper
become: true
ansible.builtin.copy:
src: wallpaper-{{ inventory_hostname }}.jpg
dest: ~{{ item }}/Pictures/wallpaper.jpg
owner: "{{ item }}"
group: "{{ item }}"
loop: "{{ _local_human_users }}"
- name: Link external media directory
become: true
ansible.builtin.file:
path: ~{{ item }}/Drives
src: /run/media/{{ item }}
state: link
force: true
loop: "{{ _local_human_users }}"

View File

@@ -1,59 +0,0 @@
---
- name: Check for MPW binary
ansible.builtin.stat:
path: /usr/local/bin/mpw
register: _mpw_binary_stat
- name: Install MPW
when: (not _mpw_binary_stat.stat.exists) or (force_reinstall | default(false))
block:
- name: Install build dependencies on Fedora
when: ansible_distribution == "Fedora"
become: true
ansible.builtin.dnf:
name:
- libsodium-devel
state: present
- name: Create temporary build directory
ansible.builtin.tempfile:
prefix: ansible.build.mpw
state: directory
register: _mpw_build_dir
- name: Download MPW source
ansible.builtin.git:
repo: https://gitlab.com/MasterPassword/MasterPassword.git
version: 344771db
recursive: false # does *not* clone submodules
dest: "{{ _mpw_build_dir.path }}"
# God I hate this
- name: Patch .gitmodules to use HTTPS
ansible.builtin.replace:
path: "{{ _mpw_build_dir.path }}/.gitmodules"
regexp: "url = git://"
replace: "url = https://"
- name: Initialize submodules
ansible.builtin.command:
cmd: git submodule update --init
chdir: "{{ _mpw_build_dir.path }}"
- name: Build MasterPassword binary
ansible.builtin.command:
cmd: bash build
chdir: "{{ _mpw_build_dir.path }}/platform-independent/cli-c/"
- name: Copy binary to system path
become: true
ansible.builtin.copy:
remote_src: true
src: "{{ _mpw_build_dir.path }}/platform-independent/cli-c/mpw"
dest: "/usr/local/bin"
mode: 0755
always:
- name: Remove temporary directory
ansible.builtin.file:
path: "{{ _mpw_build_dir.path }}"
state: absent

View File

@@ -1,79 +0,0 @@
---
- name: Check whether binary exists
become: true
ansible.builtin.stat:
path: "~{{ local_username }}/.local/bin/MultiMC"
register: _multimc_stat
- name: Install MultiMC
when: (not _multimc_stat.stat.exists) or (force_reinstall | default(false))
block:
- name: Create temp dir
ansible.builtin.tempfile:
state: directory
register: _multimc_tempdir
- name: Download and unpack distribution archive
ansible.builtin.unarchive:
src: https://files.multimc.org/downloads/mmc-stable-lin64.tar.gz
remote_src: true
dest: "{{ _multimc_tempdir.path }}"
- name: Ensure ~/.local/share/ exists
become: true
ansible.builtin.file:
path: ~{{ local_username }}/.local/share
state: directory
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Ensure ~/.local/bin/ exists
become: true
ansible.builtin.file:
path: ~{{ local_username }}/.local/bin
state: directory
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Copy MMC distribution to ~/.local/share/
become: true
ansible.builtin.copy:
remote_src: true
src: "{{ _multimc_tempdir.path }}/MultiMC/"
dest: "~{{ local_username }}/.local/share/multimc"
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Link MMC binary into ~/.local/bin/
become: true
ansible.builtin.file:
state: link
src: ~{{ local_username }}/.local/share/multimc/MultiMC
path: ~{{ local_username }}/.local/bin/MultiMC
- name: Copy application icon
become: true
ansible.builtin.copy:
src: multimc.png
dest: ~{{ local_username }}/.local/share/icons/multimc.png
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0755
- name: Template application desktop entry
become: true
ansible.builtin.template:
src: multimc.desktop.j2
dest: ~{{ local_username }}/.local/share/applications/multimc.desktop
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0755
always:
- name: Delete temp dir
ansible.builtin.file:
path: "{{ _multimc_tempdir.path }}"
state: absent

View File

@@ -1,27 +0,0 @@
---
- name: Create install directory
become: true
ansible.builtin.file:
path: /opt/pipx
state: directory
owner: "{{ ansible_user }}"
group: "{{ skylab_group_admin.name }}"
mode: 0755
- name: Create install venv
ansible.builtin.command:
cmd: python3 -m venv /opt/pipx
creates: /opt/pipx/bin/python
- name: Install pipx
ansible.builtin.pip:
name:
- pipx
executable: /opt/pipx/bin/pip
- name: Link pipx binary into system path
become: true
ansible.builtin.file:
state: link
src: /opt/pipx/bin/pipx
path: /usr/local/bin/pipx

View File

@@ -1,53 +0,0 @@
---
- name: Check whether Tor Browser is already installed
become: true
ansible.builtin.stat:
path: "~{{ local_username }}/.local/share/tor-browser/start-tor-browser.desktop"
register: _torbrowser_stat
- name: Install Tor Browser
when: not _torbrowser_stat.stat.exists
block:
- name: Create temp dir
ansible.builtin.tempfile:
state: directory
register: _torbrowser_tempdir
- name: Download and unpack distribution archive
ansible.builtin.unarchive:
src: https://dist.torproject.org/torbrowser/11.0.10/tor-browser-linux64-11.0.10_en-US.tar.xz
remote_src: true
dest: "{{ _torbrowser_tempdir.path }}"
- name: Ensure ~/.local/share/ exists
become: true
ansible.builtin.file:
path: ~{{ local_username }}/.local/share
state: directory
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Copy Tor Browser distribution to ~/.local/share/
become: true
ansible.builtin.copy:
remote_src: true
src: "{{ _torbrowser_tempdir.path }}/tor-browser_en-US/"
dest: "~{{ local_username }}/.local/share/tor-browser"
owner: "{{ local_username }}"
group: "{{ local_username }}"
mode: 0700
- name: Register application
become: true
become_user: "{{ local_username }}"
changed_when: true
ansible.builtin.command:
cmd: ./start-tor-browser.desktop
chdir: ~{{ local_username }}/.local/share/tor-browser
always:
- name: Delete temp dir
ansible.builtin.file:
path: "{{ _torbrowser_tempdir.path }}"
state: absent

Some files were not shown because too many files have changed in this diff Show More