Compare commits
63 Commits
38ce173ad5
...
devel
| Author | SHA1 | Date | |
|---|---|---|---|
| 6583c1ef15 | |||
| 1490774f4a | |||
| a7012abf28 | |||
| 9ab3a40364 | |||
| 746399c1de | |||
| eb9d35ee56 | |||
| 1f9c4df494 | |||
| bb4fb4c48f | |||
| 0581239ae6 | |||
| 52d2e7fcb5 | |||
| 4edb4d0400 | |||
| 9c6a8ec9eb | |||
| 083a5ad1e9 | |||
| 27aba94a92 | |||
| ac850f8966 | |||
| ed8a2f822a | |||
| 94e56ef57c | |||
| 68edbd6451 | |||
| cf3d842e1d | |||
| 6309a62b79 | |||
| cda80a5487 | |||
| f0783701b2 | |||
| ec023ca375 | |||
| a8cf68f70e | |||
| f0d5169e9e | |||
| a629cb0286 | |||
| e445708ed4 | |||
| b02e6a2791 | |||
| 96ed5e47be | |||
| ac09d79fa9 | |||
| d0ae93751a | |||
| cd3817dfb6 | |||
| 567e99ee0c | |||
| 3a56d20104 | |||
| 646416dbf7 | |||
| 7ed9f5c2a0 | |||
| 39603cbb9b | |||
| 6c2301d7cc | |||
| ab52415f54 | |||
| e333809b4a | |||
| 7ac5a81774 | |||
| b07650cc79 | |||
| bc6d971aef | |||
| 8bfb365dfd | |||
| b0612af979 | |||
| c20af0bdf7 | |||
| bec4d2cac5 | |||
| e7ffb1b56c | |||
| e1aefa2527 | |||
| 800c7f062c | |||
| 43370eb837 | |||
| fd056ba0fa | |||
| e0fc6ab1fa | |||
| 0016b318e2 | |||
| 1990413fbe | |||
| ae0be16dd6 | |||
| 1e1d4d75a0 | |||
| 5a48dc5a61 | |||
| 0a7b67b6c5 | |||
| 39b2e4676e | |||
| 2bda08fd2f | |||
| f1639dce1e | |||
| 5df550669a |
28
.pre-commit-config.yaml
Normal file
28
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
# All of the pre-commit hooks here actually use the `pytyhon` pre-commit language
|
||||
# setting. However, for the python language setting, pre-commit will create and manage
|
||||
# a cached virtual environment for each hook ID and do a bare `pip install <repo>` into
|
||||
# the venv to setup the hook. This can result in conflicting dependency versions between
|
||||
# the version installed to the pre-commit venv and the version installed to the Poetry
|
||||
# venv specified in the lockfile.
|
||||
#
|
||||
# The solution is to specify `language: system` for all hooks and then install the
|
||||
# required dependencies to the Poetry venv. The `system` language skips the isolated
|
||||
# venv creation and looks for the entrypoint specified by the hook in the global
|
||||
# environment which, if running in the Poetry venv, will find the entrypoint provided
|
||||
# by the Poetry-managed dependency.
|
||||
#
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.3.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
language: system
|
||||
- id: fix-encoding-pragma
|
||||
args:
|
||||
- "--remove"
|
||||
language: system
|
||||
- id: trailing-whitespace
|
||||
language: system
|
||||
- id: check-merge-conflict
|
||||
language: system
|
||||
39
README.md
39
README.md
@@ -3,6 +3,39 @@
|
||||
Network Ansible configurations
|
||||
|
||||
* The `omni_*` prefix is used for custom variables defined and used internally to
|
||||
distinguish them from `ansible_*` or other variables
|
||||
* Roles: things machines do. Tasks: how those things are done. Platform compatibility
|
||||
should be handled in tasks. Config logic should be handled in roles.
|
||||
distinguish them from `ansible_*` or other variables. The `_runtime_` prefix should
|
||||
be used for runtime variables
|
||||
* Passing `clean=true` should force cleaning any and all cached stuff
|
||||
* Passing `update=true` should update any unpinned _things_ to their latest version
|
||||
|
||||
Organizational notes:
|
||||
|
||||
* Playbooks should be platform/device agnostic. Any playbook should be runnable against
|
||||
any device. If the config a playbook deploys isn't applicable to that device then the
|
||||
playbook should be laid out so that it skips any inapplicable hosts.
|
||||
* Building from that, platform-conditionals should go in task files: `when` conditions
|
||||
in playbooks should be limited to runtime conditions.
|
||||
|
||||
Target notes:
|
||||
|
||||
* The `'mgmt'` target grants remote management access. This usually means SSH + local
|
||||
login access, but can also mean web interface (cockpit, erx, etc)
|
||||
|
||||
General workflow:
|
||||
|
||||
1. Run `provision.yml` - this gets the entire environment into a ready-to-go state but
|
||||
does not deploy any actual applications or perform any target tasks
|
||||
2. Run one or more `deploy-*.yml` - this deploys the application noted to the system
|
||||
3. Run one or more `do-*.yml` - this performs one off tasks
|
||||
|
||||
## local env creation
|
||||
|
||||
Requires Poetry-1.1+
|
||||
|
||||
```bash
|
||||
git clone https://vcs.enp.one/omni/omni-ansible.git
|
||||
|
||||
cd omni-ansible/
|
||||
|
||||
poetry install
|
||||
```
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
ansible_user: ansible
|
||||
|
||||
protected_users:
|
||||
- root
|
||||
- ansible
|
||||
|
||||
domain: net.enp.one
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
enable_gui: False
|
||||
|
||||
enable_ssh: True
|
||||
|
||||
enable_ssh_password_auth: False
|
||||
|
||||
disable_sudo_password: True
|
||||
|
||||
enable_networkd: True
|
||||
|
||||
generate_keys: False
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
enable_gui: False
|
||||
|
||||
enable_ssh: True
|
||||
|
||||
enable_ssh_password_auth: False
|
||||
|
||||
disable_sudo_password: False
|
||||
|
||||
enable_networkd: True
|
||||
|
||||
generate_keys: False
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
enable_gui: False
|
||||
|
||||
enable_ssh: True
|
||||
|
||||
enable_ssh_password_auth: False
|
||||
|
||||
disable_sudo_password: True
|
||||
|
||||
enable_networkd: True
|
||||
|
||||
generate_keys: False
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
enable_gui: True
|
||||
|
||||
enable_ssh: False
|
||||
|
||||
enable_ssh_password_auth: False
|
||||
|
||||
disable_sudo_password: False
|
||||
|
||||
enable_networkd: False
|
||||
|
||||
generate_keys: False
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
description: "EN1 Reverse Proxy / EN1 VPN Server"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- vpn
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
description: "Wandering excursion"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- workstations
|
||||
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
description: "EN1 System Control Node"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- network
|
||||
|
||||
networking:
|
||||
eno1:
|
||||
dhcp: Yes
|
||||
eno2:
|
||||
addresses: ["192.168.255.10/24"]
|
||||
|
||||
# demo:
|
||||
# addresses: ["192.168.1.10/24", "192.168.100.10/24"]
|
||||
# dhcp: true
|
||||
# dhcp6: true
|
||||
# gateway: 192.168.1.1
|
||||
# dns: ["8.8.8.8", "8.8.4.4"]
|
||||
# vlans: ["101", "200"]
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
description: "EN2 Digitial Ocean Cloud Server"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- web
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
description: "EN1 Secondary Datastore"
|
||||
targets:
|
||||
- admin
|
||||
- datastore
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
description: "And the Last"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- workstations
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
description: "EN1 Primary Datastore / EN1 Secondary Hypervisor"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- datastore
|
||||
|
||||
networking:
|
||||
ovirtmgt:
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
description: "EN1 Primary Hypervisor"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- datastore
|
||||
- hypervisor
|
||||
|
||||
networking:
|
||||
ovirtmgt:
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
description: EN1 Core Router
|
||||
|
||||
ansible_network_os: edgeos
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- network
|
||||
|
||||
network:
|
||||
ethernet_eth0:
|
||||
address: dhcp
|
||||
description: UPLINK
|
||||
extra:
|
||||
- duplex auto
|
||||
- speed auto
|
||||
ethernet_eth1:
|
||||
address: 10.42.100.1/24
|
||||
description: PUBLIC
|
||||
extra:
|
||||
- duplex auto
|
||||
- speed auto
|
||||
ethernet_eth2:
|
||||
address: 10.42.101.1/24
|
||||
description: PRIVATE
|
||||
extra:
|
||||
- duplex auto
|
||||
- speed auto
|
||||
ethernet_eth2_vif_10:
|
||||
address: 10.42.102.1/24
|
||||
description: SECURE
|
||||
extra:
|
||||
- mtu 1500
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
description: "Smooth as Silk"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- workstations
|
||||
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
description: "Watcher who Watches the Watchmen"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- workstations
|
||||
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
description: "Database Host: MariaDB"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
description: "Database Host: MySQL"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
description: "Database Host: PrometheusDB"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
description: "Development Host: Nginx Web Server"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- web
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
description: "Application Host: Bitwarden"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- bitwarden
|
||||
|
||||
networking:
|
||||
eth0:
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
description: "Application Host: Gitea"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- gitea
|
||||
|
||||
networking:
|
||||
eth0:
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
description: "Application Host: Minecraft"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- minecraft
|
||||
|
||||
networking:
|
||||
eth0:
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
description: "Application Host: Nextcloud"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- nextcloud
|
||||
|
||||
networking:
|
||||
eth0:
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
description: "Application Host: Plex Media Server"
|
||||
|
||||
targets:
|
||||
- admin
|
||||
- plex
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
- import_playbook: dependencies.yml
|
||||
|
||||
- name: Setup environment
|
||||
hosts: all:!network
|
||||
tags:
|
||||
- initialize
|
||||
vars:
|
||||
restart_services: true
|
||||
roles:
|
||||
- role: packages
|
||||
vars:
|
||||
update: true
|
||||
exclude: [] # Override the default kernel exclusion
|
||||
clean: true
|
||||
- role: sshd
|
||||
- role: networkd
|
||||
tasks:
|
||||
- name: Set hostname
|
||||
become: true
|
||||
hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
- name: Install global bashrc
|
||||
become: true
|
||||
copy:
|
||||
src: bashrc.sh
|
||||
dest: /etc/profile.d/ZA-enpn-bashrc.sh
|
||||
mode: 0644
|
||||
|
||||
- import_playbook: deploy-local-auth.yml
|
||||
|
||||
- import_playbook: deploy-sshkeys.yml
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- hosts: vms
|
||||
name: Replace NetworkManager with systemd-networkd
|
||||
tasks:
|
||||
- name: Install systemd-networkd
|
||||
when: enable_networkd == true
|
||||
block:
|
||||
- import_tasks: tasks/centos/networkd.yml
|
||||
when: ansible_distribution == "CentOS"
|
||||
- import_tasks: tasks/fedora/networkd.yml
|
||||
when: ansible_distribution == "Fedora"
|
||||
# - import_tasks: common/debian/networkd.yml
|
||||
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
|
||||
|
||||
- import_tasks: tasks/networkd/config.yml
|
||||
- import_tasks: tasks/networkd/services.yml
|
||||
|
||||
|
||||
- hosts: vms
|
||||
name: Install ovirt agent
|
||||
tasks:
|
||||
- name: Install ovirt-agent
|
||||
become: true
|
||||
yum:
|
||||
name: ovirt-guest-agent
|
||||
state: latest
|
||||
@@ -1,8 +0,0 @@
|
||||
# ANSIBLE MANAGED FILE - DO NOT EDIT
|
||||
[Match]
|
||||
Name={{ item.key }}
|
||||
|
||||
[Network]
|
||||
DHCP=Yes
|
||||
|
||||
# EOF
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- hosts: all
|
||||
name: Upgrade packages
|
||||
tasks:
|
||||
|
||||
|
||||
- name: Upgrade YUM packages
|
||||
when: ansible_distribution == "CentOS"
|
||||
become: true
|
||||
yum:
|
||||
state: latest
|
||||
name: "*"
|
||||
exclude: kernel*{{ ',' + exclude_upgrade | default('') }}
|
||||
|
||||
- name: Upgrade DNF packages
|
||||
when: ansible_distribution == "Fedora"
|
||||
become: true
|
||||
dnf:
|
||||
state: latest
|
||||
name: "*"
|
||||
exclude: kernel*{{ ',' + exclude_upgrade | default('') }}
|
||||
|
||||
# - name: Upgrade APT packages
|
||||
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
|
||||
# become: true
|
||||
# apt:
|
||||
@@ -1,132 +0,0 @@
|
||||
---
|
||||
- import_playbook: dependencies.yml
|
||||
|
||||
- hosts: all:!network
|
||||
name: Update local user accounts and access controls
|
||||
tasks:
|
||||
- import_tasks: tasks/users-preprocessing.yml
|
||||
|
||||
- name: Create local user accounts
|
||||
tags: users_create
|
||||
become: true
|
||||
block:
|
||||
- name: Create groups
|
||||
group:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ targets + ['omni'] }}"
|
||||
|
||||
- name: Create users
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
comment: "{{ item.fullname | default('') }}"
|
||||
shell: /bin/bash
|
||||
groups: "{{ item.targets | intersect(targets) + ['omni'] }}"
|
||||
system: "{{ item.svc | default(False) }}"
|
||||
state: present
|
||||
generate_ssh_key: "{{ True if generate_keys | bool == true else False }}"
|
||||
ssh_key_comment: "{{ item.name }}@{{ inventory_hostname }}"
|
||||
ssh_key_bits: 4096
|
||||
ssh_key_type: ed25519
|
||||
password: "{{ item.password }}"
|
||||
loop: "{{ local_users }}"
|
||||
|
||||
- name: Delete removed user accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ local_removed_users | difference(protected_users) }}"
|
||||
|
||||
- name: Grant sudo permissions to admin user accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
|
||||
state: present
|
||||
loop: "{{ local_admin_users }}"
|
||||
|
||||
- name: Disable sudo password for ansible
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/30-ansible
|
||||
line: "ansible ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
|
||||
- name: Disable sudo password for admin users
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/40-admin
|
||||
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
state: "{{ 'absent' if disable_sudo_password | bool == false else 'present' }}"
|
||||
loop: "{{ local_admin_users }}"
|
||||
|
||||
- name: Configure GNOME
|
||||
tags: users_gnome
|
||||
when: ansible_distribution == "Fedora" and disable_gnome_user_list | bool == true
|
||||
become: true
|
||||
block:
|
||||
- name: Configure GDM profile
|
||||
blockinfile:
|
||||
create: true
|
||||
path: /etc/dconf/profile/gdm
|
||||
block: |
|
||||
user-db:user
|
||||
system-db:gdm
|
||||
file-db:/usr/share/gdm/greeter-dconf-defaults
|
||||
- name: Configure GDM keyfile
|
||||
blockinfile:
|
||||
create: true
|
||||
path: /etc/dconf/db/gdm.d/00-login-screen
|
||||
block: |
|
||||
[org/gnome/login-screen]
|
||||
# Do not show the user list
|
||||
disable-user-list=true
|
||||
- name: Delete existing user database
|
||||
file:
|
||||
path: /var/lib/gdm/.config/dconf/user
|
||||
state: absent
|
||||
- name: Restart dconf database
|
||||
shell: dconf update
|
||||
|
||||
- name: Ensure proper ownership of user home directories
|
||||
become: true
|
||||
file:
|
||||
group: "{{ item.name }}"
|
||||
owner: "{{ item.name }}"
|
||||
path: /home/{{ item.name }}
|
||||
recurse: true
|
||||
state: directory
|
||||
loop: "{{ local_users }}"
|
||||
|
||||
# - hosts: router.net.enp.one
|
||||
# name: Configure users on router
|
||||
# connection: network_cli
|
||||
# vars:
|
||||
# ansible_network_os: edgeos
|
||||
# tasks:
|
||||
# - import_tasks: tasks/users-preprocessing.yml
|
||||
#
|
||||
# - name: Create users
|
||||
# edgeos_config:
|
||||
# lines:
|
||||
# - set system login user {{ item.name }} authentication encrypted-password "{{ item.password }}"
|
||||
# - set system login user {{ item.name }} full-name "{{ item.fullname if item.fullname is defined else "" }}"
|
||||
# - set system login user {{ item.name }} level {{ 'operator' if item.name != 'ansible' else 'admin' }}
|
||||
# loop: "{{ local_users | difference([None]) }}"
|
||||
#
|
||||
# - name: Grant administrative access to admin users
|
||||
# edgeos_config:
|
||||
# lines:
|
||||
# - set system login user {{ item.name }} level admin
|
||||
# loop: "{{ local_admin_users | difference([None]) }}"
|
||||
#
|
||||
# - name: Assemble key files for loadkey usage
|
||||
# edgeos_command:
|
||||
# commands: sudo tee /tmp/{{ item.name }}.keys<<<"{{ item.sshkeys | join('\n') }}"
|
||||
# loop: "{{ local_admin_users | difference([None]) }}"
|
||||
#
|
||||
# - import_playbook: deploy-sshkeys.yml
|
||||
@@ -1,59 +0,0 @@
|
||||
---
|
||||
- hosts: router.net.enp.one
|
||||
name: Configure users on router
|
||||
connection: network_cli
|
||||
<<<<<<< Updated upstream
|
||||
gather_facts: false
|
||||
=======
|
||||
vars:
|
||||
ansible_network_os: edgeos
|
||||
>>>>>>> Stashed changes
|
||||
tasks:
|
||||
- import_tasks: tasks/users-preprocessing.yml
|
||||
|
||||
- name: Create users
|
||||
edgeos_config:
|
||||
lines:
|
||||
- set system login user {{ item.name }} authentication encrypted-password "{{ item.password }}"
|
||||
- set system login user {{ item.name }} full-name "{{ item.fullname if item.fullname is defined else "" }}"
|
||||
- set system login user {{ item.name }} level {{ 'operator' if item.name != 'ansible' else 'admin' }}
|
||||
loop: "{{ local_users | difference([None]) }}"
|
||||
|
||||
- name: Grant administrative access to admin users
|
||||
edgeos_config:
|
||||
lines:
|
||||
- set system login user {{ item.name }} level admin
|
||||
loop: "{{ local_admin_users | difference([None]) }}"
|
||||
|
||||
<<<<<<< Updated upstream
|
||||
- name: Assemble loadkey files
|
||||
edgeos_command:
|
||||
commands:
|
||||
- sudo tee "{{ item.sshkeys | join('\n') }}"<<</tmp/{{ item.name }}.keys
|
||||
loop: "{{ local_admin_users | difference([None]) }}"
|
||||
|
||||
- name: Load keys
|
||||
edgeos_config:
|
||||
lines:
|
||||
- loadkey {{ item }} /tmp/{{ item }}.keys
|
||||
loop: "{{ local_admin_users | difference([None]) }}"
|
||||
=======
|
||||
- name: Assemble key files for loadkey usage
|
||||
edgeos_command:
|
||||
commands: sudo tee /tmp/{{ item.name }}.keys<<<"{{ item.sshkeys | join('\n') }}"
|
||||
loop: "{{ local_admin_users | difference([None]) }}"
|
||||
|
||||
# - name: Assemble loadkey files
|
||||
# copy:
|
||||
# src: keys/{{ item }}
|
||||
# dest: /tmp
|
||||
# with_items:
|
||||
# - "{{ local_admin_users | difference([None]) }}"
|
||||
|
||||
# - name: Load keys
|
||||
# edgeos_config:
|
||||
# lines:
|
||||
# - loadkey {{ item }} /tmp/{{ item }}/*.pub
|
||||
# with_items:
|
||||
# - "{{ local_admin_users | difference([None]) }}"
|
||||
>>>>>>> Stashed changes
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
- import_playbook: dependencies.yml
|
||||
|
||||
- import_playbook: update-system.yml
|
||||
- import_playbook: update-users-local.yml
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Install required packages
|
||||
when: ansible_distribution == "Fedora" or (ansible_distribution == "CentOS" and ansible_distribution_major_version == "8")
|
||||
become: true
|
||||
dnf:
|
||||
state: latest
|
||||
name:
|
||||
- openldap-servers
|
||||
- openldap-clients
|
||||
- nss-pam-ldapd
|
||||
|
||||
- name: Configure
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
- name: Install Ovirt on CentOS 8
|
||||
become: true
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
|
||||
block:
|
||||
- name: Install Ovirt repository
|
||||
dnf:
|
||||
state: latest
|
||||
name: http://resources.ovirt.org/pub/yum-repo/ovirt-release43.rpm
|
||||
- name: Update using the new repository
|
||||
dnf:
|
||||
state: latest
|
||||
name: "*"
|
||||
exclude: kernel*
|
||||
- name: Install Ovirt
|
||||
dnf:
|
||||
state: latest
|
||||
name: ovirt-engine
|
||||
|
||||
- name: Install Ovrit on CentOS 7
|
||||
become: true
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
|
||||
block:
|
||||
- name: Install Ovirt repository
|
||||
yum:
|
||||
state: latest
|
||||
name: http://resources.ovirt.org/pub/yum-repo/ovirt-release43.rpm
|
||||
- name: Update using the new repository
|
||||
yum:
|
||||
state: latest
|
||||
name: "*"
|
||||
exclude: kernel*
|
||||
- name: Install Ovirt
|
||||
yum:
|
||||
state: latest
|
||||
name: ovirt-engine
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
- name: Install CentOS 8 python bindings
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
|
||||
become: true
|
||||
dnf:
|
||||
state: latest
|
||||
name:
|
||||
- python3-libselinux
|
||||
- python3-policycoreutils
|
||||
- python3-firewall
|
||||
|
||||
- name: Install CentoOS 7 python bindings
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
|
||||
become: true
|
||||
yum:
|
||||
state: latest
|
||||
name:
|
||||
- libselinux-python
|
||||
- policycoreutils-python
|
||||
- python-firewall
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: Install systemd-networkd
|
||||
become: true
|
||||
yum:
|
||||
state: latest
|
||||
name:
|
||||
- systemd-resolved
|
||||
- systemd-networkd
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Install global packages using YUM
|
||||
become: true
|
||||
yum:
|
||||
state: latest
|
||||
name: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ packages_global }}"
|
||||
- "{{ packages_yum }}"
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
- name: Enable Extra Packages for Enterprise Linux
|
||||
become: true
|
||||
dnf_repository:
|
||||
name: epel
|
||||
description: Extra Packages for Enterprise Linux
|
||||
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
|
||||
|
||||
- name: Install Extra Packages for Enterprise Linux GPG key
|
||||
become: true
|
||||
rpm_key:
|
||||
state: present
|
||||
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
|
||||
|
||||
- name: Disable yum subscription-manager
|
||||
become: true
|
||||
lineinfile:
|
||||
regex: enabled=1
|
||||
line: enabled=0
|
||||
path: /etc/yum/pluginconf.d/subscription-manager.conf
|
||||
create: yes
|
||||
state: present
|
||||
|
||||
- name: Disable yum repo report upload
|
||||
become: true
|
||||
lineinfile:
|
||||
regex: enabled=1
|
||||
line: enabled=0
|
||||
path: /etc/yum/pluginconf.d/enabled_repos_upload.conf
|
||||
create: yes
|
||||
state: present
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
- name: Install Fedora python bindings
|
||||
when: ansible_distribution == "Fedora"
|
||||
become: true
|
||||
dnf:
|
||||
state: latest
|
||||
name:
|
||||
- libselinux-python
|
||||
- policycoreutils-python
|
||||
- python3-firewall
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: Install systemd-networkd
|
||||
become: true
|
||||
dnf:
|
||||
state: latest
|
||||
name:
|
||||
- systemd-resolved
|
||||
- systemd-networkd
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Install global packages using DNF
|
||||
become: true
|
||||
dnf:
|
||||
state: latest
|
||||
name: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ packages_global }}"
|
||||
- "{{ packages_dnf }}"
|
||||
@@ -1,55 +0,0 @@
|
||||
---
|
||||
# The directory is deleted ahead of creation to ensure that no old configs
|
||||
# remain after runnign ansible
|
||||
- name: Delete networkd config directory
|
||||
become: true
|
||||
file:
|
||||
path: /etc/systemd/network
|
||||
state: absent
|
||||
|
||||
- name: Create the networkd config directory
|
||||
become: true
|
||||
file:
|
||||
path: /etc/systemd/network
|
||||
state: directory
|
||||
|
||||
- name: Make .network files
|
||||
become: true
|
||||
template:
|
||||
src: network.j2
|
||||
dest: "/etc/systemd/network/{{ item.key }}.network"
|
||||
with_dict: "{{ networking }}"
|
||||
|
||||
- name: Configure systemd services
|
||||
become: true
|
||||
block:
|
||||
- name: Disable network scripts and NetworkManager
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
enabled: false
|
||||
with_items:
|
||||
- network
|
||||
- NetworkManager
|
||||
- NetworkManager-wait-online
|
||||
- name: Enable systemd-networkd and systemd-resolved
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
enabled: true
|
||||
state: started
|
||||
with_items:
|
||||
- systemd-networkd
|
||||
- systemd-resolved
|
||||
- systemd-networkd-wait-online
|
||||
- name: Symlink so systemd-resolved uses /etc/resolv.conf
|
||||
file:
|
||||
dest: /etc/resolv.conf
|
||||
src: /run/systemd/resolve/resolv.conf
|
||||
state: link
|
||||
force: true
|
||||
setype: net_conf_t
|
||||
- name: Symlink so /etc/resolv.conf uses systemd
|
||||
file:
|
||||
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
|
||||
src: /usr/lib/systemd/system/systemd-resolved.service
|
||||
state: link
|
||||
force: true
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
# The directory is deleted ahead of creation to ensure that no old configs
|
||||
# remain after runnign ansible
|
||||
- name: Delete networkd config directory
|
||||
become: true
|
||||
file:
|
||||
path: /etc/systemd/network
|
||||
state: absent
|
||||
|
||||
- name: Create the networkd config directory
|
||||
become: true
|
||||
file:
|
||||
path: /etc/systemd/network
|
||||
state: directory
|
||||
|
||||
- name: Make .network files
|
||||
when: networking is defined
|
||||
become: true
|
||||
template:
|
||||
src: network.j2
|
||||
dest: "/etc/systemd/network/{{ item.key }}.network"
|
||||
with_dict: "{{ networking }}"
|
||||
@@ -1,39 +0,0 @@
|
||||
---
|
||||
- name: Load users variables
|
||||
include_vars:
|
||||
file: users.yml
|
||||
|
||||
- name: Reconcile user targets with host targets to get host users
|
||||
set_fact:
|
||||
users_local: >-
|
||||
{{
|
||||
users_local | default([]) + ([item] if item.targets | intersect(local_targets) else [])
|
||||
}}
|
||||
loop: "{{ users }}"
|
||||
|
||||
- name: Determine local user names
|
||||
set_fact:
|
||||
users_local_names: "{{ users_local_names | default([]) + [item.name] }}"
|
||||
loop: "{{ users_local }}"
|
||||
|
||||
- name: Determine administrative users
|
||||
set_fact:
|
||||
users_local_admin: >-
|
||||
{{
|
||||
users_local_admin | default([]) + ([item] if item.admin | default(False) else [])
|
||||
}}
|
||||
loop: "{{ users_local }}"
|
||||
|
||||
- name: Determine existing users
|
||||
shell: 'grep omni /etc/group | cut -d: -f4 | tr "," "\n"'
|
||||
changed_when: false
|
||||
register: users_local_existing
|
||||
|
||||
- name: Determine removed users
|
||||
set_fact:
|
||||
users_local_removed: >-
|
||||
{{
|
||||
users_local_removed | default([]) +
|
||||
([item] if item not in users_local_names else [])
|
||||
}}
|
||||
loop: "{{ users_local_existing.stdout_lines }}"
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: Install SSH Banner
|
||||
become: true
|
||||
template:
|
||||
src: motd.j2
|
||||
dest: /etc/issue.net
|
||||
mode: 0644
|
||||
|
||||
- name: Configure SSH banner
|
||||
become: true
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '#Banner none'
|
||||
line: 'Banner /etc/issue.net'
|
||||
@@ -1,21 +0,0 @@
|
||||
- name: Turn off password authentication
|
||||
become: true
|
||||
replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "PasswordAuthentication yes"
|
||||
replace: "PasswordAuthentication no"
|
||||
|
||||
- name: Turn off challenge response authentication
|
||||
become: true
|
||||
replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "ChallengeResponseAuthentication yes"
|
||||
replace: "ChallengeResponseAuthentication no"
|
||||
|
||||
- name: Turn off GSSAPI authentication
|
||||
become: true
|
||||
replace:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: "GSSAPIAuthentication yes"
|
||||
replace: "GSSAPIAuthentication no"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
tasks
|
||||
@@ -1,2 +0,0 @@
|
||||
---
|
||||
ansible_user: ansible
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
gateway: router.net.enp.one
|
||||
|
||||
dhcp:
|
||||
- name: PUBLIC
|
||||
subnet: 10.42.100.1/24
|
||||
dns: 10.42.100.1
|
||||
domain: tre2.local
|
||||
lease: 21600
|
||||
start: 10.42.100.26
|
||||
stop: 10.42.100.254
|
||||
|
||||
- name: DOMAIN
|
||||
subnet: 10.42.101.0/24
|
||||
dns: 10.42.101.1
|
||||
domain: net.enp.one
|
||||
lease: 21600
|
||||
start: 10.42.101.100
|
||||
stop: 10.42.101.254
|
||||
|
||||
- name: SECURE
|
||||
subnet: 10.42.102.0/24
|
||||
dns: 10.42.102.1
|
||||
domain: net.enp.one
|
||||
lease: 3600
|
||||
start: 10.42.102.50
|
||||
stop: 10.42.102.254
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
packages_global:
|
||||
- cmake
|
||||
- curl
|
||||
- gcc
|
||||
- gcc-c++
|
||||
- git
|
||||
- libselinux-python
|
||||
- make
|
||||
- nano
|
||||
- openssl-devel
|
||||
- policycoreutils-python
|
||||
- python-devel
|
||||
- python-virtualenv
|
||||
- systemd-devel
|
||||
- unzip
|
||||
- vim
|
||||
- vim-minimal
|
||||
|
||||
packages_dnf:
|
||||
- python3-devel
|
||||
|
||||
packages_yum:
|
||||
- bash-completion
|
||||
- bash-completion-extras
|
||||
- nc
|
||||
- nfs-utils
|
||||
- wget
|
||||
8
ansible.cfg
Normal file
8
ansible.cfg
Normal file
@@ -0,0 +1,8 @@
|
||||
[defaults]
|
||||
host_key_checking = false
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = "-C -o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes"
|
||||
|
||||
[inventory]
|
||||
enable_plugins = yaml
|
||||
143
en1.yml
143
en1.yml
@@ -4,125 +4,84 @@ all:
|
||||
ansible_user: ansible
|
||||
ansible_python_interpreter: /opt/ansible/bin/python
|
||||
omni_ansible_venv: /opt/ansible
|
||||
omni_protected_users: ["root", "ansible"]
|
||||
omni_domain: net.enp.one
|
||||
update: false
|
||||
clean: false
|
||||
|
||||
children:
|
||||
network:
|
||||
hosts:
|
||||
router:
|
||||
ansible_host: en1-core.net.enp.one
|
||||
ansible_network_os: edgeos
|
||||
ansible_connection: network_cli
|
||||
description: EN1 Core Gateway
|
||||
targets: ["admin", "network"]
|
||||
|
||||
servers:
|
||||
vars:
|
||||
omni_os:
|
||||
name: centos
|
||||
version_major: "8"
|
||||
omni_local_hosts:
|
||||
- hostname: jupiter.svr.local
|
||||
ip: 192.168.42.10
|
||||
- hostname: remus.svr.local
|
||||
ip: 192.168.42.20
|
||||
- hostname: romulus.svr.local
|
||||
ip: 192.168.42.30
|
||||
hosts:
|
||||
jupiter:
|
||||
ansible_host: jupiter.net.enp.one
|
||||
omni_description: EN1 System Control Server
|
||||
omni_local_targets: ["core", "network"]
|
||||
omni_docker_swarm_iface: eno2
|
||||
omni_networking:
|
||||
eno1:
|
||||
dhcp: true
|
||||
dhcp_address: 10.42.101.10/42
|
||||
eno2:
|
||||
dhcp: false
|
||||
addresses: ["192.168.42.10/24"]
|
||||
remus:
|
||||
ansible_host: remus.net.enp.one
|
||||
omni_description: EN1 Hypervisor/Datastore
|
||||
omni_local_targets: ["core", "vms"]
|
||||
omni_docker_swarm_iface: eno2
|
||||
omni_networking:
|
||||
eno1:
|
||||
dhcp: true
|
||||
dhcp_address: 10.42.101.20/24
|
||||
eno2:
|
||||
dhcp: false
|
||||
addresses: ["192.168.42.20/24"]
|
||||
romulus:
|
||||
ansible_host: romulus.net.enp.one
|
||||
omni_description: EN1 Hypervisor/Datastore
|
||||
omni_local_targets: ["core", "vms"]
|
||||
omni_docker_swarm_iface: eno2
|
||||
omni_networking:
|
||||
eno1:
|
||||
dhcp: true
|
||||
dhcp_address: 10.42.101.30/24
|
||||
eno2:
|
||||
dhcp: false
|
||||
addresses: ["192.168.42.20/24"]
|
||||
# novis:
|
||||
# ansible_host: novis.tre2.local
|
||||
# description: EN1 Backup Storage
|
||||
# local_targets: ["core", "datastore"]
|
||||
# children:
|
||||
# vms:
|
||||
# vars:
|
||||
# disable_sudo_password: true
|
||||
# required_os: centos_8
|
||||
# hosts:
|
||||
# gitea:
|
||||
# ansible_host: vm-host-gitea.net.enp.one
|
||||
# description: "Application Host: Gitea VCS"
|
||||
# local_targets: ["admin", "vcs"]
|
||||
# networking:
|
||||
# eth0:
|
||||
# dhcp: true
|
||||
# plex:
|
||||
# ansible_host: vm-host-plex.net.enp.one
|
||||
# description: "Application Host: Plex Media Server"
|
||||
# local_targets: ["admin", "plx"]
|
||||
# networking:
|
||||
# eth0:
|
||||
# dhcp: true
|
||||
# bitwarden:
|
||||
# ansible_host: vm-host-bitwarden.net.enp.one
|
||||
# description: "Application Host: Bitwarden Password Manager"
|
||||
# local_targets: ["admin", "ssv"]
|
||||
# networking:
|
||||
# eth0:
|
||||
# dhcp: true
|
||||
# nextcloud:
|
||||
# ansible_host: vm-host-nextcloud.net.enp.one
|
||||
# description: "Application Host: Nextcloud Web Storage"
|
||||
# local_targets: ["admin", "cfs"]
|
||||
# networking:
|
||||
# eth0:
|
||||
# dhcp: true
|
||||
# workstations:
|
||||
# vars:
|
||||
# enable_gui: true
|
||||
# enable_ssh: false
|
||||
# enable_networkd: false
|
||||
# hosts:
|
||||
# omega:
|
||||
# ansible_host: localhost
|
||||
# description: Last
|
||||
# required_os: centos_7
|
||||
# local_targets: ["admin", "recovery"]
|
||||
# vigil-nox:
|
||||
# ansible_host: localhost
|
||||
# required_os: fedora_31
|
||||
# description: Watchman
|
||||
# local_targets: ["admin", "desktop"]
|
||||
# serico-nox:
|
||||
# ansible_host: localhost
|
||||
# description: Silk
|
||||
# required_os: fedora_31
|
||||
# local_targets: ["admin", "desktop"]
|
||||
# inerro:
|
||||
# ansible_host: localhost
|
||||
# description: Wanderer
|
||||
# required_os: fedora_31
|
||||
# local_targets: ["admin", "desktop"]
|
||||
# network:
|
||||
# hosts:
|
||||
# router:
|
||||
# ansible_host: router.net.enp.one
|
||||
# ansible_network_os: edgeos
|
||||
# ansible_connection: network_cli
|
||||
# description: EN1 Core Gateway
|
||||
# targets: ["admin", "network"]
|
||||
addresses: ["192.168.42.30/24"]
|
||||
children:
|
||||
virtualization: {}
|
||||
datastore: {}
|
||||
|
||||
virtualization:
|
||||
hosts:
|
||||
jupiter:
|
||||
omni_docker_configs: /etc/omni/compose
|
||||
omni_docker_swarm_iface: eno2
|
||||
children:
|
||||
virtualization_worker:
|
||||
hosts:
|
||||
remus:
|
||||
omni_docker_swarm_iface: eno2
|
||||
romulus:
|
||||
omni_docker_swarm_iface: eno2
|
||||
|
||||
datastore:
|
||||
children:
|
||||
datastore_arbiter:
|
||||
hosts:
|
||||
jupiter:
|
||||
omni_datastore_mount: /mnt/datastore
|
||||
omni_gluster_brick:
|
||||
mount: /mnt/brick0
|
||||
fs: xfs
|
||||
datastore_block:
|
||||
hosts:
|
||||
remus:
|
||||
omni_gluster_brick:
|
||||
mount: /mnt/brick0
|
||||
fs: xfs
|
||||
romulus:
|
||||
omni_gluster_brick:
|
||||
mount: /mnt/brick0
|
||||
fs: xfs
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
---
|
||||
- name: Configure system authentication
|
||||
hosts: all
|
||||
roles:
|
||||
- role: sshd
|
||||
tasks:
|
||||
- import_tasks: tasks/preprocess-users.yml
|
||||
|
||||
- name: Create local user accounts
|
||||
tags: users_create
|
||||
become: true
|
||||
block:
|
||||
- name: Create groups
|
||||
group:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ omni_local_targets + ['omni'] }}"
|
||||
|
||||
- name: Load user passwords
|
||||
include_vars:
|
||||
file: secrets/passwords.yml
|
||||
|
||||
- name: Create users
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
comment: "{{ item.fullname | default('') }}"
|
||||
shell: /bin/bash
|
||||
groups: "{{ item.targets | intersect(omni_local_targets) + ['omni'] }}"
|
||||
system: "{{ item.svc | default(false) }}"
|
||||
state: present
|
||||
generate_ssh_key: false
|
||||
password: "{{ omni_users_secrets[item.name] }}"
|
||||
loop: "{{ _users_local }}"
|
||||
|
||||
- name: Delete removed user accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _users_local_removed | default([]) | difference(omni_protected_users) }}"
|
||||
|
||||
- name: Grant sudo permissions to admin user accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
|
||||
state: present
|
||||
loop: "{{ _users_local_admin }}"
|
||||
|
||||
- name: Disable sudo password for ansible
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/30-ansible
|
||||
line: "ansible ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
|
||||
- name: Disable sudo password for admin users
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/40-admin
|
||||
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
state: "{{ 'present' if omni_disable_sudo_password | default(false) | bool == true else 'absent' }}"
|
||||
loop: "{{ _users_local_admin }}"
|
||||
|
||||
- name: Ensure proper ownership of user home directories
|
||||
become: true
|
||||
file:
|
||||
group: "{{ item.name }}"
|
||||
owner: "{{ item.name }}"
|
||||
path: /home/{{ item.name }}
|
||||
recurse: true
|
||||
state: directory
|
||||
loop: "{{ _users_local }}"
|
||||
|
||||
- import_tasks: tasks/deploy-ssh-keys.yml
|
||||
@@ -1,69 +0,0 @@
|
||||
---
|
||||
- name: Configure docker hosts
|
||||
hosts: servers
|
||||
roles:
|
||||
- role: docker
|
||||
tasks:
|
||||
- name: Allow swarm traffic through the firewall
|
||||
become: true
|
||||
firewalld:
|
||||
state: enabled
|
||||
service: docker-swarm
|
||||
zone: public
|
||||
permanent: true
|
||||
immediate: true
|
||||
- name: Disable firewall on docker bridge interface
|
||||
become: true
|
||||
firewalld:
|
||||
state: enabled
|
||||
interface: docker0
|
||||
zone: trusted
|
||||
permanent: true
|
||||
immediate: true
|
||||
|
||||
- name: Configure swarm master
|
||||
hosts: jupiter
|
||||
tasks:
|
||||
- name: Configure portainer volume
|
||||
docker_volume:
|
||||
volume_name: portainer
|
||||
|
||||
- name: Run portainer
|
||||
docker_container:
|
||||
name: omni.portainer
|
||||
image: portainer/portainer
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- 0.0.0.0:8000:8000
|
||||
- 0.0.0.0:9000:9000
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- portainer:/data
|
||||
|
||||
- name: Initialize swarm
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ omni_docker_swarm_iface }}"
|
||||
|
||||
- name: Set swarm master to DRAIN
|
||||
docker_node:
|
||||
hostname: "{{ ansible_host }}"
|
||||
availability: drain
|
||||
|
||||
- name: Configure swarm nodes
|
||||
hosts:
|
||||
- remus
|
||||
- romulus
|
||||
tags: docker-nodes
|
||||
tasks:
|
||||
- name: Fetch docker swarm information
|
||||
delegate_to: jupiter
|
||||
docker_swarm_info:
|
||||
register: _swarm_info
|
||||
|
||||
- name: Join workers to swarm
|
||||
docker_swarm:
|
||||
state: join
|
||||
remote_addrs: ["jupiter.svr.local"]
|
||||
join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
|
||||
advertise_addr: "{{ omni_docker_swarm_iface }}"
|
||||
@@ -7,21 +7,83 @@
|
||||
hostname:
|
||||
name: "{{ ansible_host }}"
|
||||
|
||||
- import_tasks: tasks/preprocess-users.yml
|
||||
- import_tasks: tasks/sshd/banner.yml
|
||||
|
||||
- name: Install network bash profile
|
||||
- name: Install global bash components
|
||||
become: true
|
||||
copy:
|
||||
src: bashrc.sh
|
||||
dest: /home/{{ item.name }}/.bashrc
|
||||
src: bash/{{ item }}.sh
|
||||
dest: /etc/profile.d/Z-{{ 10 + loop_index }}-enpn-{{ item }}.sh
|
||||
mode: 0644
|
||||
loop: "{{ _users_local }}"
|
||||
loop:
|
||||
- global
|
||||
- pyenv
|
||||
- aliases
|
||||
- helpers
|
||||
loop_control:
|
||||
index_var: loop_index
|
||||
label: "{{ item }}"
|
||||
|
||||
- name: Disable dynamic MOTD
|
||||
become: true
|
||||
replace:
|
||||
path: /etc/pam.d/sshd
|
||||
regexp: "^session\\s+optional\\s+pam_motd\\.so.*$"
|
||||
replace: "#session optional pam_motd.so"
|
||||
|
||||
- name: Remove legacy global bashrc
|
||||
become: true
|
||||
file:
|
||||
path: /etc/profile.d/ZA-enpn-bashrc.sh
|
||||
state: absent
|
||||
|
||||
- name: Disable case-sensitive autocomplete
|
||||
become: true
|
||||
lineinfile:
|
||||
path: /home/{{ item.name }}/.inputrc
|
||||
line: set completion-ignore-case On
|
||||
path: /etc/inputrc
|
||||
line: set completion-ignore-case ((o|O)(n|ff))
|
||||
create: true
|
||||
mode: 0644
|
||||
loop: "{{ _users_local }}"
|
||||
|
||||
- name: Configure additional security settings on shared servers
|
||||
hosts: servers
|
||||
tasks:
|
||||
- name: Identify local home directories
|
||||
become: true
|
||||
find:
|
||||
file_type: directory
|
||||
path: /home/
|
||||
recurse: false
|
||||
register: _local_home_dirs
|
||||
|
||||
- name: Determine files to write-protect
|
||||
set_fact:
|
||||
_secure_files: >-
|
||||
{{ _secure_files | default([]) + [
|
||||
item.path ~ '/.bashrc',
|
||||
item.path ~ '/.bash_profile',
|
||||
item.path ~ '/.ssh/authorized_keys',
|
||||
item.path ~ '/.ssh/config'
|
||||
] }}
|
||||
loop: "{{ _local_home_dirs.files }}"
|
||||
loop_control:
|
||||
label: "{{ item.path }}"
|
||||
|
||||
- name: Fetch status of secure files
|
||||
become: true
|
||||
stat:
|
||||
path: "{{ item }}"
|
||||
loop: "{{ _secure_files }}"
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
register: _secure_file_stats
|
||||
|
||||
- name: Restrict access to secure files
|
||||
become: true
|
||||
file:
|
||||
path: "{{ item.item }}"
|
||||
state: "{{ 'file' if item.stat.exists else 'touch' }}"
|
||||
mode: 0400
|
||||
loop: "{{ _secure_file_stats.results }}"
|
||||
loop_control:
|
||||
label: "Write-protecting: {{ item.item }}"
|
||||
|
||||
164
playbooks/configure-mgmt.yml
Normal file
164
playbooks/configure-mgmt.yml
Normal file
@@ -0,0 +1,164 @@
|
||||
---
|
||||
- name: Configure server management services
|
||||
hosts: servers
|
||||
tasks:
|
||||
- import_tasks: tasks/sshd/secure.yml
|
||||
|
||||
- name: Enable cockpit
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
|
||||
become: true
|
||||
systemd:
|
||||
name: cockpit.socket
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
- name: Configure virtualization management services
|
||||
hosts: virtualization
|
||||
tasks:
|
||||
- name: Create docker group
|
||||
become: true
|
||||
group:
|
||||
name: docker
|
||||
state: present
|
||||
|
||||
- name: Configure local accounts
|
||||
hosts: all
|
||||
vars_files:
|
||||
- vars/accounts.yaml
|
||||
- vars/secrets/passwords.yaml
|
||||
- vars/sshkeys.yaml
|
||||
tasks:
|
||||
- name: Create omni group
|
||||
become: true
|
||||
group:
|
||||
name: "{{ omni_group.name }}"
|
||||
gid: "{{ omni_group.gid }}"
|
||||
state: present
|
||||
|
||||
- name: Determine existing omni users
|
||||
changed_when: false
|
||||
shell:
|
||||
cmd: 'grep omni /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
|
||||
register: _existing_omni_users
|
||||
|
||||
- name: Delete removed user accounts
|
||||
become: true
|
||||
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_omni_users.stdout_lines }}"
|
||||
|
||||
- name: Delete removed user groups
|
||||
become: true
|
||||
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
|
||||
group:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_omni_users.stdout_lines }}"
|
||||
|
||||
- name: Delete removed user home directories
|
||||
become: true
|
||||
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
|
||||
file:
|
||||
path: "/home/{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_omni_users.stdout_lines }}"
|
||||
|
||||
- name: Create account groups
|
||||
become: true
|
||||
group:
|
||||
name: "{{ item.name }}"
|
||||
gid: "{{ item.uid }}"
|
||||
state: present
|
||||
loop: "{{ omni_users }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Create accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
state: present
|
||||
uid: "{{ item.uid }}"
|
||||
group: "{{ item.name }}"
|
||||
groups: >-
|
||||
{{
|
||||
[omni_group.name] +
|
||||
(['wheel' if ansible_os_family | lower == 'redhat' else 'sudo'] if item.admin | default(false) else []) +
|
||||
(['docker' if 'virtualization' in group_names else omni_group.name] if item.admin | default(false) else [])
|
||||
}}
|
||||
# The 'else omni_group.name' above is just some non-breaking value to cover the
|
||||
# false condition, it doesn't have special meaning
|
||||
comment: "{{ item.fullname | default('') }}"
|
||||
shell: "{{ '/bin/bash' if 'mgmt' in item.targets else '/bin/false' }}"
|
||||
system: "{{ item.svc | default(false) }}"
|
||||
generate_ssh_key: false
|
||||
password: "{{ omni_users_secrets[item.name] | default(none) }}"
|
||||
loop: "{{ omni_users }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Disable sudo password for ansible
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/30-ansible
|
||||
line: "ansible ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
|
||||
- name: Ensure proper ownership of user home directories
|
||||
become: true
|
||||
file:
|
||||
path: /home/{{ item.name }}
|
||||
state: directory
|
||||
group: "{{ item.name }}"
|
||||
owner: "{{ item.name }}"
|
||||
mode: 0700
|
||||
loop: "{{ omni_users }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Enforce root password
|
||||
become: true
|
||||
user:
|
||||
name: root
|
||||
password: "{{ omni_users_secrets.root }}"
|
||||
state: present
|
||||
|
||||
- name: Create SSH directory
|
||||
become: true
|
||||
file:
|
||||
path: /home/{{ item.name }}/.ssh
|
||||
owner: "{{ item.name }}"
|
||||
group: "{{ item.name }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop: "{{ omni_users }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Update authorized keys
|
||||
become: true
|
||||
when: "'mgmt' in item.targets"
|
||||
authorized_key:
|
||||
user: "{{ item.name }}"
|
||||
key: "{{ omni_ssh_keys[item.name] | join('\n') }}"
|
||||
state: present
|
||||
exclusive: true
|
||||
loop: "{{ omni_users }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
|
||||
- name: Enforce ownership of authorized keys
|
||||
become: true
|
||||
when: "'mgmt' in item.targets"
|
||||
file:
|
||||
path: /home/{{ item.name }}/.ssh/authorized_keys
|
||||
state: file
|
||||
owner: "{{ item.name }}"
|
||||
group: "{{ item.name }}"
|
||||
mode: 0400
|
||||
loop: "{{ omni_users }}"
|
||||
loop_control:
|
||||
label: "{{ item.uid }},{{ item.name }}"
|
||||
@@ -1,35 +1,34 @@
|
||||
---
|
||||
- name: Configure router
|
||||
hosts: router
|
||||
gather_facts: false
|
||||
pre_tasks:
|
||||
- name: Collect EdgeOS facts
|
||||
edgeos_facts:
|
||||
gather_subset: "!config"
|
||||
tasks:
|
||||
- name: Configure interfaces
|
||||
edgeos_config:
|
||||
lines:
|
||||
- set interfaces ethernet eth0 address dhcp
|
||||
- set interfaces ethernet eth0 description EXTERNAL
|
||||
- set interfaces ethernet eth1 address 10.42.100.1/24
|
||||
- set interfaces ethernet eth1 address 10.42.99.1/24
|
||||
- set interfaces ethernet eth1 description LOCAL
|
||||
- set interfaces ethernet eth2 address 10.42.101.1/24
|
||||
- set interfaces ethernet eth2 description DOMAIN
|
||||
# - name: Configure router
|
||||
# hosts: router
|
||||
# gather_facts: false
|
||||
# pre_tasks:
|
||||
# - name: Collect EdgeOS facts
|
||||
# edgeos_facts:
|
||||
# gather_subset: "!config"
|
||||
# tasks:
|
||||
# - name: Configure interfaces
|
||||
# edgeos_config:
|
||||
# lines:
|
||||
# - set interfaces ethernet eth0 address dhcp
|
||||
# - set interfaces ethernet eth0 description EXTERNAL
|
||||
# - set interfaces ethernet eth1 address 10.42.100.1/24
|
||||
# - set interfaces ethernet eth1 address 10.42.99.1/24
|
||||
# - set interfaces ethernet eth1 description LOCAL
|
||||
# - set interfaces ethernet eth2 address 10.42.101.1/24
|
||||
# - set interfaces ethernet eth2 description DOMAIN
|
||||
|
||||
- name: Configure servers
|
||||
- name: Configure server networking
|
||||
hosts: servers
|
||||
roles:
|
||||
- role: networkd
|
||||
tasks:
|
||||
- import_tasks: tasks/networkd/install.yml
|
||||
- import_tasks: tasks/networkd/configure.yml
|
||||
- import_tasks: tasks/networkd/services.yml
|
||||
|
||||
- name: Configure local hostsfile
|
||||
become: true
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
state: present
|
||||
line: "{{ item }}"
|
||||
loop:
|
||||
- "192.168.42.10 jupiter.svr.local"
|
||||
- "192.168.42.20 remus.svr.local"
|
||||
- "192.168.42.30 romulus.svr.local"
|
||||
line: "{{ item.ip }} {{ item.hostname }}"
|
||||
loop: "{{ omni_local_hosts | default([]) }}"
|
||||
|
||||
54
playbooks/configure-webproxy.yml
Normal file
54
playbooks/configure-webproxy.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
- import_playbook: initialize.yml
|
||||
|
||||
|
||||
- name: Install Nginx
|
||||
hosts: jupiter
|
||||
handlers:
|
||||
- name: restart-nginx
|
||||
import_tasks: tasks/nginx/services.yml
|
||||
tasks:
|
||||
- import_tasks: tasks/nginx/install.yml
|
||||
|
||||
- name: Set required SELinux options
|
||||
become: true
|
||||
seboolean:
|
||||
name: httpd_can_network_connect
|
||||
persistent: true
|
||||
state: true
|
||||
notify:
|
||||
- restart-nginx
|
||||
|
||||
|
||||
- name: Configure Nginx
|
||||
hosts: jupiter
|
||||
vars_files:
|
||||
- vars/applications.yaml
|
||||
vars:
|
||||
_letsencrypt_cert_dir: /etc/letsencrypt/live
|
||||
handlers:
|
||||
- name: restart-nginx
|
||||
import_tasks: tasks/nginx/services.yml
|
||||
tasks:
|
||||
- name: Install server configuration
|
||||
become: true
|
||||
copy:
|
||||
src: nginx/nginx.conf
|
||||
dest: /etc/nginx/nginx.conf
|
||||
notify:
|
||||
- restart-nginx
|
||||
|
||||
- name: Install application configurations
|
||||
when: item.value.published.host is defined
|
||||
become: true
|
||||
template:
|
||||
src: nginx/{{ item.key }}.nginx.conf.j2
|
||||
dest: /etc/nginx/conf.d/{{ item.key }}.conf
|
||||
owner: nginx
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
loop: "{{ omni_compose_apps | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }} ({{ item.value.published.host | default('none') }})"
|
||||
notify:
|
||||
- restart-nginx
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
- import_playbook: meta.yml
|
||||
|
||||
- name: Configure system settings
|
||||
hosts: all
|
||||
pre_tasks:
|
||||
- import_tasks: tasks/centos-8-kernelplus.yml
|
||||
tags: kernel
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
|
||||
roles:
|
||||
- role: packages
|
||||
vars:
|
||||
omni_pkg_clean: true
|
||||
- role: sshd
|
||||
vars:
|
||||
omni_restart_services: true
|
||||
tasks:
|
||||
- name: Enable cockpit
|
||||
become: true
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
|
||||
systemd:
|
||||
name: cockpit
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
- import_playbook: configure-auth.yml
|
||||
|
||||
- import_playbook: configure-env.yml
|
||||
|
||||
- import_playbook: configure-network.yml
|
||||
|
||||
- import_playbook: configure-docker.yml
|
||||
98
playbooks/deploy-compose.yml
Normal file
98
playbooks/deploy-compose.yml
Normal file
@@ -0,0 +1,98 @@
|
||||
---
|
||||
- name: Prompt for input
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
gather_facts: false
|
||||
vars_prompt:
|
||||
- name: application
|
||||
prompt: Enter name of application stack to deploy
|
||||
private: false
|
||||
vars_files:
|
||||
- vars/applications.yaml
|
||||
tasks:
|
||||
- name: Validate user input
|
||||
assert:
|
||||
that: application in omni_compose_apps.keys()
|
||||
|
||||
- name: Set facts for usage later
|
||||
set_fact:
|
||||
_runtime_application: "{{ application }}"
|
||||
|
||||
|
||||
- import_playbook: initialize.yml
|
||||
|
||||
|
||||
- name: Build image
|
||||
hosts: virtualization
|
||||
vars_files:
|
||||
- vars/applications.yaml
|
||||
tasks:
|
||||
- import_tasks: tasks/docker/build.yml
|
||||
|
||||
|
||||
- name: Configure datastore
|
||||
hosts: jupiter
|
||||
vars_files:
|
||||
- vars/applications.yaml
|
||||
- vars/secrets/applications.yaml
|
||||
tasks:
|
||||
- name: Create application datastore directory
|
||||
become: true
|
||||
file:
|
||||
path: "{{ omni_datastore_mount }}{{ omni_compose_apps[_runtime_application].datastore }}"
|
||||
state: directory
|
||||
owner: "{{ omni_compose_apps[_runtime_application].account.name }}"
|
||||
group: "{{ omni_compose_apps[_runtime_application].account.name }}"
|
||||
mode: 0750
|
||||
|
||||
- name: Create datastore assets
|
||||
become: true
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ omni_datastore_mount }}{{ omni_compose_apps[_runtime_application].datastore }}/{{ item.name }}"
|
||||
owner: "{{ omni_compose_apps[_runtime_application].account.name }}"
|
||||
group: "{{ omni_compose_apps[_runtime_application].account.name }}"
|
||||
mode: "{{ item.permissions | default(0644) }}"
|
||||
loop: "{{ omni_compose_apps[_runtime_application].assets | default([]) }}"
|
||||
|
||||
|
||||
- name: Configure docker stack
|
||||
hosts: jupiter
|
||||
vars_files:
|
||||
- vars/applications.yaml
|
||||
- vars/secrets/applications.yaml
|
||||
tasks:
|
||||
- name: Create compose configuration directory
|
||||
become: true
|
||||
file:
|
||||
path: "{{ omni_docker_configs }}/{{ _runtime_application }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: docker
|
||||
mode: 0750
|
||||
|
||||
- name: Install docker-compose file
|
||||
become: true
|
||||
template:
|
||||
src: docker-compose/{{ _runtime_application }}.yaml.j2
|
||||
dest: "{{ omni_docker_configs }}/{{ _runtime_application }}/docker-compose.yaml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: docker
|
||||
mode: 0640
|
||||
register: _stack_file_state
|
||||
|
||||
- name: Remove the existing stack
|
||||
when: _stack_file_state.changed is true or omni_compose_apps[_runtime_application].force_clean | default(false) is true
|
||||
docker_stack:
|
||||
name: "{{ _runtime_application }}"
|
||||
state: absent
|
||||
compose:
|
||||
- "{{ omni_docker_configs }}/{{ _runtime_application }}/docker-compose.yaml"
|
||||
|
||||
- name: Deploy the stack
|
||||
docker_stack:
|
||||
name: "{{ _runtime_application }}"
|
||||
state: present
|
||||
compose:
|
||||
- "{{ omni_docker_configs }}/{{ _runtime_application }}/docker-compose.yaml"
|
||||
@@ -1,67 +0,0 @@
|
||||
---
|
||||
- hosts: nimbus-1.net.enp.one
|
||||
name: Deploy documentation
|
||||
vars:
|
||||
# Local directory to use for cloning and building the documentation site
|
||||
DIR_BUILD: /tmp/docs
|
||||
# Remote directory to install the site at
|
||||
DIR_DEPLOY: /usr/share/nginx/doc.enp.one/html
|
||||
tasks:
|
||||
- name: Build the static site locally
|
||||
delegate_to: 127.0.0.1
|
||||
block:
|
||||
- name: Ensure the build directory does not exist
|
||||
file:
|
||||
path: "{{ DIR_BUILD }}"
|
||||
state: absent
|
||||
- name: Clone documentation repository
|
||||
git:
|
||||
repo: git@vcs.enp.one:omni/omni-docs.git
|
||||
dest: "{{ DIR_BUILD }}/"
|
||||
- name: Generate build env requirements file
|
||||
# Generate the requirements.txt style format, pipe through grep to remove
|
||||
# the index line (not sure why thats included at all tbh) and save the
|
||||
# result in "requirements.txt" to usage with pip
|
||||
shell: pipenv lock --requirements | grep --invert-match "\-i">requirements.txt
|
||||
args:
|
||||
chdir: "{{ DIR_BUILD }}/"
|
||||
- name: Create build env and install requirements
|
||||
pip:
|
||||
requirements: "{{ DIR_BUILD }}/requirements.txt"
|
||||
virtualenv: "{{ DIR_BUILD }}/venv"
|
||||
virtualenv_python: python3
|
||||
state: present
|
||||
- name: Build the static site using mkdocs
|
||||
shell: "{{ DIR_BUILD }}/venv/bin/mkdocs build"
|
||||
args:
|
||||
chdir: "{{ DIR_BUILD }}"
|
||||
|
||||
- name: Upload static site to remote
|
||||
copy:
|
||||
src: "{{ DIR_BUILD }}/site/"
|
||||
dest: "/tmp/docs/"
|
||||
- name: Remove legacy site
|
||||
become: true
|
||||
file:
|
||||
path: "{{ DIR_DEPLOY }}"
|
||||
state: absent
|
||||
- name: Copy static site to deployment directory
|
||||
become: true
|
||||
copy:
|
||||
src: "/tmp/docs/"
|
||||
dest: "{{ DIR_DEPLOY }}"
|
||||
remote_src: true
|
||||
owner: root
|
||||
group: nginx
|
||||
mode: 0755
|
||||
setype: httpd_sys_content_t
|
||||
|
||||
- name: Clean up local build directory
|
||||
delegate_to: 127.0.0.1
|
||||
file:
|
||||
path: "{{ DIR_BUILD }}"
|
||||
state: absent
|
||||
- name: Clean up remote temp directory
|
||||
file:
|
||||
path: /tmp/docs
|
||||
state: absent
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
- hosts: nimbus-1.net.enp.one
|
||||
name: Deploy main landing page at enpaul.net
|
||||
vars:
|
||||
# Local directory to use for cloning and building the documentation site
|
||||
DIR_BUILD: /tmp/docs
|
||||
# Remote directory to install the site at
|
||||
DIR_DEPLOY: /usr/share/nginx/enpaul.net/html
|
||||
tasks:
|
||||
- name: Upload static site to remote
|
||||
copy:
|
||||
src: "{{ DIR_BUILD }}/site/"
|
||||
dest: "/tmp/docs/"
|
||||
- name: Remove legacy site
|
||||
become: true
|
||||
file:
|
||||
path: "{{ DIR_DEPLOY }}"
|
||||
state: absent
|
||||
- name: Copy static site to deployment directory
|
||||
become: true
|
||||
copy:
|
||||
src: "/tmp/docs/"
|
||||
dest: "{{ DIR_DEPLOY }}"
|
||||
remote_src: true
|
||||
owner: root
|
||||
group: nginx
|
||||
mode: 0755
|
||||
setype: httpd_sys_content_t
|
||||
|
||||
- name: Clean up local build directory
|
||||
delegate_to: 127.0.0.1
|
||||
file:
|
||||
path: "{{ DIR_BUILD }}"
|
||||
state: absent
|
||||
- name: Clean up remote temp directory
|
||||
file:
|
||||
path: /tmp/docs
|
||||
state: absent
|
||||
@@ -1,78 +0,0 @@
|
||||
---
|
||||
- name: Configure local users
|
||||
hosts: all:!network
|
||||
tags:
|
||||
- auth
|
||||
- ssh
|
||||
- users
|
||||
tasks:
|
||||
- import_tasks: tasks/preprocess-local-users.yml
|
||||
|
||||
- name: Create local user accounts
|
||||
tags: users_create
|
||||
become: true
|
||||
block:
|
||||
- name: Create groups
|
||||
group:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ local_targets + ['omni'] }}"
|
||||
|
||||
- name: Load user passwords
|
||||
include_vars:
|
||||
file: secrets/passwords.yml
|
||||
|
||||
- name: Create users
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
comment: "{{ item.fullname | default('') }}"
|
||||
shell: /bin/bash
|
||||
groups: "{{ item.targets | intersect(local_targets) + ['omni'] }}"
|
||||
system: "{{ item.svc | default(False) }}"
|
||||
state: present
|
||||
generate_ssh_key: false
|
||||
password: "{{ users_secrets[item.name] }}"
|
||||
loop: "{{ users_local }}"
|
||||
|
||||
- name: Delete removed user accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ users_local_removed | default([]) | difference(protected_users) }}"
|
||||
|
||||
- name: Grant sudo permissions to admin user accounts
|
||||
become: true
|
||||
user:
|
||||
name: "{{ item.name }}"
|
||||
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
|
||||
state: present
|
||||
loop: "{{ users_local_admin }}"
|
||||
|
||||
- name: Disable sudo password for ansible
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/30-ansible
|
||||
line: "ansible ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
|
||||
- name: Disable sudo password for admin users
|
||||
become: true
|
||||
lineinfile:
|
||||
create: true
|
||||
path: /etc/sudoers.d/40-admin
|
||||
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
|
||||
mode: 0644
|
||||
state: "{{ 'present' if disable_sudo_password | bool == true else 'absent' }}"
|
||||
loop: "{{ users_local_admin }}"
|
||||
|
||||
- name: Ensure proper ownership of user home directories
|
||||
become: true
|
||||
file:
|
||||
group: "{{ item.name }}"
|
||||
owner: "{{ item.name }}"
|
||||
path: /home/{{ item.name }}
|
||||
recurse: true
|
||||
state: directory
|
||||
loop: "{{ users_local }}"
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
- name: Deploy plex container
|
||||
hosts: remus
|
||||
tasks:
|
||||
- name: Create world volume
|
||||
docker_volume:
|
||||
name: minecraft
|
||||
driver: local
|
||||
state: present
|
||||
recreate: never
|
||||
|
||||
- name: Launch minecraft server container
|
||||
docker_container:
|
||||
name: mcs
|
||||
state: started
|
||||
image: itzg/minecraft-server
|
||||
recreate: "{{ omni_update_minecraft | default(false) | bool }}"
|
||||
volumes:
|
||||
- minecraft:/data
|
||||
published_ports:
|
||||
- "25565:25565/tcp"
|
||||
env:
|
||||
EULA: "TRUE"
|
||||
VERSION: 1.15.2
|
||||
MAX_MEMORY: "8G"
|
||||
MOTD: "A home for buttery companions"
|
||||
MODE: survival
|
||||
OPS: ScifiGeek42
|
||||
WHITELIST: "ScifiGeek42,fantasycat256,CoffeePug,Snowdude21325,KaiserSJR,glutenfreebean"
|
||||
MAX_BUILD_HEIGHT: "512"
|
||||
SNOOPER_ENABLED: "false"
|
||||
ICON: https://cdn.enp.one/img/logos/e-w-sm.png
|
||||
ENABLE_RCON: "false"
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
- name: Deploy plex container
|
||||
hosts: remus
|
||||
tasks:
|
||||
- name: Create plex metadata volume
|
||||
docker_volume:
|
||||
name: plexmeta
|
||||
driver: local
|
||||
state: present
|
||||
recreate: never
|
||||
|
||||
- name: Create plex NFS media volume
|
||||
docker_volume:
|
||||
name: plexdata
|
||||
driver: local
|
||||
state: present
|
||||
recreate: never
|
||||
driver_options:
|
||||
type: nfs
|
||||
o: "addr=plexistore.tre2.local,ro"
|
||||
device: ":/nfs/plex"
|
||||
|
||||
- name: Allow plex access through the firewall
|
||||
become: true
|
||||
firewalld:
|
||||
state: enabled
|
||||
service: plex
|
||||
permanent: true
|
||||
immediate: true
|
||||
|
||||
- name: Launch plex container
|
||||
docker_container:
|
||||
name: pms
|
||||
state: started
|
||||
image: plexinc/pms-docker:latest
|
||||
pull: true
|
||||
recreate: "{{ omni_update_plex | default(false) | bool }}"
|
||||
network_mode: host
|
||||
volumes:
|
||||
- plexmeta:/config
|
||||
- plexdata:/data:ro
|
||||
env:
|
||||
TZ: America/New_York
|
||||
ALLOWED_NETWORKS: 10.42.100.0/24,10.42.101.0/24
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: Update ssh keys on all devices
|
||||
hosts: all
|
||||
tasks:
|
||||
- import_tasks: tasks/preprocess-local-users.yml
|
||||
|
||||
- name: Ensure SSH directory exists
|
||||
become: true
|
||||
file:
|
||||
state: directory
|
||||
path: /home/{{ item.name }}/.ssh
|
||||
loop: "{{ users_local }}"
|
||||
|
||||
- name: Put keys on remote
|
||||
become: true
|
||||
when: item.keys != []
|
||||
authorized_key:
|
||||
user: "{{ item.name }}"
|
||||
key: "{{ item.sshkeys | join('\n') }}"
|
||||
state: present
|
||||
exclusive: yes
|
||||
loop: "{{ users_local }}"
|
||||
1
playbooks/files
Symbolic link
1
playbooks/files
Symbolic link
@@ -0,0 +1 @@
|
||||
../resources
|
||||
@@ -1,63 +0,0 @@
|
||||
# Global network bashrc/profile file
|
||||
# Updated 2020-03-18
|
||||
|
||||
function parse_git_branch() {
|
||||
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
|
||||
}
|
||||
|
||||
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]$(parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
|
||||
|
||||
function venv() {
|
||||
DIR="/home/$USERNAME/.venvs"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "No command specified"
|
||||
|
||||
elif [ $1 = "--help" ] || [ $1 = '-h' ]; then
|
||||
echo "Custom python Virtualenv manager
|
||||
\"Because pipenv is too hard and everything else sucks\"
|
||||
|
||||
Commands:
|
||||
list List available virtualenvs
|
||||
show Alias of list
|
||||
delete <venv> Delete a virtualenv
|
||||
del Alias of delete
|
||||
rm Alias of delete
|
||||
load <venv> Activate a virtualenv for usage
|
||||
new <venv> <python> Create a new virtualenv. If <python> is not specified,
|
||||
then the system default python is used
|
||||
"
|
||||
elif [ $1 = "list" ] || [ $1 = "show" ] || [ $1 = "ls" ]; then
|
||||
ls $DIR
|
||||
elif [ $1 = "load" ]; then
|
||||
. $DIR/$2/bin/activate
|
||||
elif [ $1 = "new" ]; then
|
||||
virtualenv $DIR/$2 --python=$3
|
||||
elif [ $1 = "delete" ] || [ $1 = "del" ] || [ $1 = "rm" ]; then
|
||||
rm -rf $DIR/$2
|
||||
elif [ $1 = "go" ]; then
|
||||
cd $DIR/$2
|
||||
fi
|
||||
}
|
||||
|
||||
function up() { cd $(eval printf '../'%.0s {1..$1}); }
|
||||
|
||||
function pipin() { pip freeze | grep $1; }
|
||||
|
||||
alias bk='cd -'
|
||||
alias fuck='sudo $(history -p \!\!)'
|
||||
alias doc='cd ~/Documents'
|
||||
alias dn='cd ~/Downloads'
|
||||
alias version='uname -orp && lsb_release -a | grep Description'
|
||||
alias activate='source ./bin/activate'
|
||||
alias cls='clear'
|
||||
alias mklink='ln -s'
|
||||
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
|
||||
alias gg='cd ~/Git'
|
||||
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
|
||||
alias date='/usr/bin/date --iso-8601=seconds'
|
||||
alias whatismyip='curl https://icanhazip.com/'
|
||||
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
|
||||
alias epoch="python3 -c 'import time; print(time.time());'"
|
||||
|
||||
export rc=/home/$USERNAME/.bashrc
|
||||
@@ -1,12 +0,0 @@
|
||||
alias powerline='/opt/powerline/bin/powerline'
|
||||
alias powerline-config='/opt/powerline/bin/powerline-config'
|
||||
alias powerline-daemon='/opt/powerline/bin/powerline-daemon'
|
||||
alias powerline-lint='/opt/powerline/bin/powerline-lint'
|
||||
alias powerline-render='/opt/powerline/bin/powerline-render'
|
||||
|
||||
if [ -z ${DISABLE_POWERLINE} ]; then
|
||||
powerline-daemon -q
|
||||
POWERLINE_BASH_CONTINUATION=1
|
||||
POWERLINE_BASH_SELECT=1
|
||||
source /opt/powerline/powerline.sh
|
||||
fi
|
||||
@@ -1,153 +0,0 @@
|
||||
_powerline_columns_fallback() {
|
||||
if which stty &>/dev/null ; then
|
||||
local cols="$(stty size 2>/dev/null)"
|
||||
if ! test -z "$cols" ; then
|
||||
echo "${cols#* }"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
echo 0
|
||||
return 0
|
||||
}
|
||||
|
||||
_powerline_tmux_pane() {
|
||||
echo "${TMUX_PANE:-`TMUX="$_POWERLINE_TMUX" tmux display -p "#D"`}" | \
|
||||
tr -d ' %'
|
||||
}
|
||||
|
||||
_powerline_tmux_setenv() {
|
||||
TMUX="$_POWERLINE_TMUX" tmux setenv -g TMUX_"$1"_`_powerline_tmux_pane` "$2"
|
||||
TMUX="$_POWERLINE_TMUX" tmux refresh -S
|
||||
}
|
||||
|
||||
_powerline_tmux_set_pwd() {
|
||||
if test "$_POWERLINE_SAVED_PWD" != "$PWD" ; then
|
||||
_POWERLINE_SAVED_PWD="$PWD"
|
||||
_powerline_tmux_setenv PWD "$PWD"
|
||||
fi
|
||||
}
|
||||
|
||||
_powerline_return() {
|
||||
return $1
|
||||
}
|
||||
|
||||
_POWERLINE_HAS_PIPESTATUS="$(
|
||||
_powerline_return 0 | _powerline_return 43
|
||||
test "${PIPESTATUS[*]}" = "0 43"
|
||||
echo "$?"
|
||||
)"
|
||||
|
||||
_powerline_has_pipestatus() {
|
||||
return $_POWERLINE_HAS_PIPESTATUS
|
||||
}
|
||||
|
||||
_powerline_status_wrapper() {
|
||||
local last_exit_code=$? last_pipe_status=( "${PIPESTATUS[@]}" )
|
||||
|
||||
if ! _powerline_has_pipestatus \
|
||||
|| test "${#last_pipe_status[@]}" -eq "0" \
|
||||
|| test "$last_exit_code" != "${last_pipe_status[$(( ${#last_pipe_status[@]} - 1 ))]}" ; then
|
||||
last_pipe_status=()
|
||||
fi
|
||||
"$@" $last_exit_code "${last_pipe_status[*]}"
|
||||
return $last_exit_code
|
||||
}
|
||||
|
||||
_powerline_add_status_wrapped_command() {
|
||||
local action="$1" ; shift
|
||||
local cmd="$1" ; shift
|
||||
full_cmd="_powerline_status_wrapper $cmd"
|
||||
if test "$action" = "append" ; then
|
||||
PROMPT_COMMAND="$PROMPT_COMMAND"$'\n'"$full_cmd"
|
||||
else
|
||||
PROMPT_COMMAND="$full_cmd"$'\n'"$PROMPT_COMMAND"
|
||||
fi
|
||||
}
|
||||
|
||||
_powerline_tmux_set_columns() {
|
||||
_powerline_tmux_setenv COLUMNS "${COLUMNS:-`_powerline_columns_fallback`}"
|
||||
}
|
||||
|
||||
_powerline_init_tmux_support() {
|
||||
if test -n "$TMUX" && tmux refresh -S &>/dev/null ; then
|
||||
# TMUX variable may be unset to create new tmux session inside this one
|
||||
_POWERLINE_TMUX="$TMUX"
|
||||
|
||||
trap '_powerline_tmux_set_columns' WINCH
|
||||
_powerline_tmux_set_columns
|
||||
|
||||
test "$PROMPT_COMMAND" != "${PROMPT_COMMAND/_powerline_tmux_set_pwd}" \
|
||||
|| _powerline_add_status_wrapped_command append _powerline_tmux_set_pwd
|
||||
fi
|
||||
}
|
||||
|
||||
_powerline_local_prompt() {
|
||||
# Arguments:
|
||||
# 1: side
|
||||
# 2: renderer_module arg
|
||||
# 3: last_exit_code
|
||||
# 4: last_pipe_status
|
||||
# 5: jobnum
|
||||
# 6: local theme
|
||||
"$POWERLINE_COMMAND" $POWERLINE_COMMAND_ARGS shell $1 \
|
||||
$2 \
|
||||
--last-exit-code=$3 \
|
||||
--last-pipe-status="$4" \
|
||||
--jobnum=$5 \
|
||||
--renderer-arg="client_id=$$" \
|
||||
--renderer-arg="local_theme=$6"
|
||||
}
|
||||
|
||||
_powerline_prompt() {
|
||||
# Arguments:
|
||||
# 1: side
|
||||
# 2: last_exit_code
|
||||
# 3: last_pipe_status
|
||||
# 4: jobnum
|
||||
"$POWERLINE_COMMAND" $POWERLINE_COMMAND_ARGS shell $1 \
|
||||
--width="${COLUMNS:-$(_powerline_columns_fallback)}" \
|
||||
-r.bash \
|
||||
--last-exit-code=$2 \
|
||||
--last-pipe-status="$3" \
|
||||
--jobnum=$4 \
|
||||
--renderer-arg="client_id=$$"
|
||||
}
|
||||
|
||||
_powerline_set_prompt() {
|
||||
local last_exit_code=$1 ; shift
|
||||
local last_pipe_status=$1 ; shift
|
||||
local jobnum="$(jobs -p|wc -l)"
|
||||
PS1="$(_powerline_prompt aboveleft $last_exit_code "$last_pipe_status" $jobnum)"
|
||||
if test -n "$POWERLINE_SHELL_CONTINUATION$POWERLINE_BASH_CONTINUATION" ; then
|
||||
PS2="$(_powerline_local_prompt left -r.bash $last_exit_code "$last_pipe_status" $jobnum continuation)"
|
||||
fi
|
||||
if test -n "$POWERLINE_SHELL_SELECT$POWERLINE_BASH_SELECT" ; then
|
||||
PS3="$(_powerline_local_prompt left '' $last_exit_code "$last_pipe_status" $jobnum select)"
|
||||
fi
|
||||
}
|
||||
|
||||
_powerline_setup_prompt() {
|
||||
VIRTUAL_ENV_DISABLE_PROMPT=1
|
||||
if test -z "${POWERLINE_COMMAND}" ; then
|
||||
POWERLINE_COMMAND="$("$POWERLINE_CONFIG_COMMAND" shell command)"
|
||||
fi
|
||||
test "$PROMPT_COMMAND" != "${PROMPT_COMMAND%_powerline_set_prompt*}" \
|
||||
|| _powerline_add_status_wrapped_command prepend _powerline_set_prompt
|
||||
PS2="$(_powerline_local_prompt left -r.bash 0 0 0 continuation)"
|
||||
PS3="$(_powerline_local_prompt left '' 0 0 0 select)"
|
||||
}
|
||||
|
||||
if test -z "${POWERLINE_CONFIG_COMMAND}" ; then
|
||||
if which powerline-config >/dev/null ; then
|
||||
POWERLINE_CONFIG_COMMAND=powerline-config
|
||||
else
|
||||
POWERLINE_CONFIG_COMMAND="$(dirname "$BASH_SOURCE")/../../../scripts/powerline-config"
|
||||
fi
|
||||
fi
|
||||
|
||||
if "${POWERLINE_CONFIG_COMMAND}" shell --shell=bash uses prompt ; then
|
||||
_powerline_setup_prompt
|
||||
fi
|
||||
if "${POWERLINE_CONFIG_COMMAND}" shell --shell=bash uses tmux ; then
|
||||
_powerline_init_tmux_support
|
||||
fi
|
||||
@@ -1,53 +0,0 @@
|
||||
{
|
||||
"common": {
|
||||
"term_truecolor": false
|
||||
},
|
||||
"ext": {
|
||||
"ipython": {
|
||||
"colorscheme": "default",
|
||||
"theme": "in",
|
||||
"local_themes": {
|
||||
"rewrite": "rewrite",
|
||||
"out": "out",
|
||||
"in2": "in2"
|
||||
}
|
||||
},
|
||||
"pdb": {
|
||||
"colorscheme": "default",
|
||||
"theme": "default"
|
||||
},
|
||||
"shell": {
|
||||
"colorscheme": "default",
|
||||
"theme": "default_leftonly",
|
||||
"local_themes": {
|
||||
"continuation": "continuation",
|
||||
"select": "select"
|
||||
}
|
||||
},
|
||||
"tmux": {
|
||||
"colorscheme": "default",
|
||||
"theme": "default"
|
||||
},
|
||||
"vim": {
|
||||
"colorscheme": "default",
|
||||
"theme": "default",
|
||||
"local_themes": {
|
||||
"__tabline__": "tabline",
|
||||
|
||||
"cmdwin": "cmdwin",
|
||||
"help": "help",
|
||||
"quickfix": "quickfix",
|
||||
|
||||
"powerline.matchers.vim.plugin.nerdtree.nerdtree": "plugin_nerdtree",
|
||||
"powerline.matchers.vim.plugin.commandt.commandt": "plugin_commandt",
|
||||
"powerline.matchers.vim.plugin.gundo.gundo": "plugin_gundo",
|
||||
"powerline.matchers.vim.plugin.gundo.gundo_preview": "plugin_gundo-preview"
|
||||
}
|
||||
},
|
||||
"wm": {
|
||||
"colorscheme": "default",
|
||||
"theme": "default",
|
||||
"update_interval": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
117
playbooks/initialize.yml
Normal file
117
playbooks/initialize.yml
Normal file
@@ -0,0 +1,117 @@
|
||||
---
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
vars:
|
||||
# Set this fact to allow the bootstrap play to run using the native system python
|
||||
# interpreter. A variable defined here is only in scope while this specific play
|
||||
# is being run; once this play is done this value is dropped and the default value
|
||||
# (which is actually set in the inventory file to the interpreter created by this
|
||||
# play) will be used.
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
tasks:
|
||||
- name: Determine runtime settings
|
||||
set_fact:
|
||||
_runtime_clean: "{{ true if (clean | bool) else false }}"
|
||||
_runtime_update: "{{ true if (update | bool) else false }}"
|
||||
_runtime_update_state: "{{ 'latest' if (update | bool) else 'present' }}"
|
||||
|
||||
- name: Clean bootstrap virtualenv
|
||||
when: _runtime_clean
|
||||
become: true
|
||||
file:
|
||||
path: "{{ omni_ansible_venv }}"
|
||||
state: absent
|
||||
|
||||
- name: Create bootstrap virtualenv directory
|
||||
become: true
|
||||
file:
|
||||
path: "{{ omni_ansible_venv }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0755
|
||||
|
||||
- name: Create bootstrap virtualenv
|
||||
command:
|
||||
cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
|
||||
creates: "{{ omni_ansible_venv }}/bin/python"
|
||||
|
||||
- name: Generate remote requirements file locally
|
||||
delegate_to: 127.0.0.1
|
||||
command:
|
||||
cmd: poetry export --format requirements.txt
|
||||
changed_when: false
|
||||
register: _poetry_requirements
|
||||
|
||||
- name: Copy remote requirements file
|
||||
blockinfile:
|
||||
path: "{{ omni_ansible_venv }}/req.txt"
|
||||
create: true
|
||||
block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
|
||||
mode: 0644
|
||||
|
||||
- name: Install remote requirements
|
||||
pip:
|
||||
executable: "{{ omni_ansible_venv }}/bin/pip"
|
||||
requirements: "{{ omni_ansible_venv }}/req.txt"
|
||||
state: present
|
||||
|
||||
- name: Install CentOS 8 python bindings
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
|
||||
become: true
|
||||
dnf:
|
||||
state: "{{ _runtime_update_state }}"
|
||||
name:
|
||||
- python3-libselinux
|
||||
- python3-policycoreutils
|
||||
- python3-firewall
|
||||
|
||||
- name: Install CentOS 7 python bindings
|
||||
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
|
||||
become: true
|
||||
yum:
|
||||
state: "{{ _runtime_update_state }}"
|
||||
name:
|
||||
- libselinux-python
|
||||
- policycoreutils-python
|
||||
- python-firewall
|
||||
|
||||
- name: Install Fedora python bindings
|
||||
when: ansible_distribution == "Fedora"
|
||||
become: true
|
||||
dnf:
|
||||
state: "{{ _runtime_update_state }}"
|
||||
name:
|
||||
- libselinux-python
|
||||
- policycoreutils-python
|
||||
- python3-firewall
|
||||
|
||||
|
||||
- name: Check meta environment
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
tasks:
|
||||
- name: Check required operating system
|
||||
when: omni_os is defined
|
||||
assert:
|
||||
that:
|
||||
- omni_os.name == ansible_distribution | lower
|
||||
- omni_os.version_major == ansible_distribution_major_version
|
||||
fail_msg: >-
|
||||
Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}',
|
||||
expected '{{ omni_os.name }} {{ omni_os.version_major }}'
|
||||
success_msg: >-
|
||||
Remote is running expected OS '{{ ansible_distribution }}
|
||||
{{ ansible_distribution_major_version }}'
|
||||
|
||||
- name: Check required interpreter settings
|
||||
assert:
|
||||
that:
|
||||
- ansible_python_interpreter.startswith(omni_ansible_venv) is true
|
||||
fail_msg: >-
|
||||
Interpreter '{{ ansible_python_interpreter }}'
|
||||
is not in the expected venv '{{ omni_ansible_venv }}'
|
||||
success_msg: Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"
|
||||
@@ -1,72 +0,0 @@
|
||||
---
|
||||
- name: Bootstrap remote ansible environment
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
become: true
|
||||
tags:
|
||||
- always
|
||||
- meta
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
tasks:
|
||||
- name: Clean bootstrap virtualenv
|
||||
when: omni_force_reinstall is defined
|
||||
file:
|
||||
path: "{{ omni_ansible_venv }}"
|
||||
state: absent
|
||||
|
||||
- name: Create bootstrap virtualenv
|
||||
command:
|
||||
cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
|
||||
creates: "{{ omni_ansible_venv }}/bin/python"
|
||||
|
||||
- name: Generate remote requirements file locally
|
||||
become: false
|
||||
delegate_to: 127.0.0.1
|
||||
command:
|
||||
cmd: poetry export --format requirements.txt
|
||||
changed_when: false
|
||||
register: _poetry_requirements
|
||||
|
||||
- name: Copy remote requirements file
|
||||
blockinfile:
|
||||
path: "{{ omni_ansible_venv }}/req.txt"
|
||||
create: true
|
||||
block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
|
||||
|
||||
- name: Install remote requirements
|
||||
pip:
|
||||
executable: "{{ omni_ansible_venv }}/bin/pip"
|
||||
requirements: "{{ omni_ansible_venv }}/req.txt"
|
||||
state: present
|
||||
|
||||
- name: Assign ownership of the virtualenv to ansible
|
||||
file:
|
||||
path: "{{ omni_ansible_venv }}"
|
||||
state: directory
|
||||
owner: ansible
|
||||
group: ansible
|
||||
recurse: true
|
||||
follow: false
|
||||
|
||||
- name: Check meta environment
|
||||
hosts: all
|
||||
tags:
|
||||
- always
|
||||
- meta
|
||||
tasks:
|
||||
- name: Check required operating system
|
||||
when: omni_os is defined
|
||||
assert:
|
||||
that:
|
||||
- omni_os.name == ansible_distribution | lower
|
||||
- omni_os.version_major == ansible_distribution_major_version
|
||||
fail_msg: "Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}', expected '{{ omni_os.name }} {{ omni_os.version_major }}'"
|
||||
success_msg: "Remote is running expected OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}'"
|
||||
|
||||
- name: Check required interpreter settings
|
||||
assert:
|
||||
that:
|
||||
- ansible_python_interpreter.startswith(omni_ansible_venv) is true
|
||||
fail_msg: "Interpreter '{{ ansible_python_interpreter }}' is not in the expected venv '{{ omni_ansible_venv }}'"
|
||||
success_msg: "Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"
|
||||
29
playbooks/provision-common.yml
Normal file
29
playbooks/provision-common.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- import_playbook: initialize.yml
|
||||
|
||||
|
||||
- name: Configure system settings
|
||||
hosts: all
|
||||
vars_files:
|
||||
- vars/packages.yaml
|
||||
pre_tasks:
|
||||
- import_tasks: tasks/centos-8-kernelplus.yml
|
||||
tasks:
|
||||
- import_tasks: tasks/packages/clean.yml
|
||||
when: _runtime_clean is true
|
||||
|
||||
- import_tasks: tasks/packages/repos.yml
|
||||
|
||||
- import_tasks: tasks/packages/update.yml
|
||||
when: _runtime_update is true
|
||||
|
||||
- import_tasks: tasks/packages/install.yml
|
||||
|
||||
|
||||
- import_playbook: configure-network.yml
|
||||
|
||||
|
||||
- import_playbook: configure-mgmt.yml
|
||||
|
||||
|
||||
- import_playbook: configure-env.yml
|
||||
2
playbooks/provision-datastore.yml
Normal file
2
playbooks/provision-datastore.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
# TBW
|
||||
61
playbooks/provision-swarm.yml
Normal file
61
playbooks/provision-swarm.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
# TBW
|
||||
|
||||
# - import_playbook: provision-common.yml
|
||||
#
|
||||
#
|
||||
# - name: Install and start Docker
|
||||
# hosts: virtualization
|
||||
# tasks:
|
||||
# - import_tasks: tasks/docker/install.yml
|
||||
#
|
||||
# - name: Start and enable docker service
|
||||
# become: true
|
||||
# systemd:
|
||||
# name: docker
|
||||
# state: started
|
||||
# enabled: yes
|
||||
#
|
||||
# - name: Allow swarm traffic through the firewall
|
||||
# become: true
|
||||
# firewalld:
|
||||
# zone: trusted
|
||||
# interface: "{{ item.key }}"
|
||||
# permanent: true
|
||||
# state: enabled
|
||||
#
|
||||
#
|
||||
# - name: Configure swarm master
|
||||
# hosts: "{{ omni_docker_swarm_manager }}"
|
||||
# tasks:
|
||||
# - name: Initialize swarm
|
||||
# docker_swarm:
|
||||
# state: present
|
||||
# advertise_addr: "{{ omni_docker_swarm_iface }}"
|
||||
#
|
||||
# - name: Set swarm master to DRAIN
|
||||
# docker_node:
|
||||
# hostname: "{{ ansible_host }}"
|
||||
# availability: drain
|
||||
#
|
||||
# - name: Configure swarm nodes
|
||||
# hosts:
|
||||
# - remus
|
||||
# - romulus
|
||||
# tags: docker-nodes
|
||||
# tasks:
|
||||
# - name: Fetch docker swarm information
|
||||
# delegate_to: jupiter
|
||||
# docker_swarm_info:
|
||||
# register: _swarm_info
|
||||
#
|
||||
# - name: Join workers to swarm
|
||||
# docker_swarm:
|
||||
# state: join
|
||||
# remote_addrs: ["jupiter.svr.local"]
|
||||
# join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
|
||||
# advertise_addr: "{{ omni_docker_swarm_iface }}"
|
||||
#
|
||||
# # docker plugin install --alias glusterfs trajano/glusterfs-volume-plugin:v2.0.3 --grant-all-permissions --disable
|
||||
# # docker plugin set glusterfs SERVERS=jupiter.svr.local,remus.svr.local,romulus.svr.local
|
||||
# # docker plugin enable glusterfs
|
||||
16
playbooks/provision.yml
Normal file
16
playbooks/provision.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
# First: meta setup. Check everything is as we expect and that we have a remote
|
||||
# venv with required dependencies
|
||||
- import_playbook: initialize.yml
|
||||
|
||||
# Second: initial setup. Enforces the system to a "known good" state that we can
|
||||
# work with
|
||||
- import_playbook: provision-common.yml
|
||||
|
||||
# Third: setup the datastore. Lots of downstream stuff won't work without the ability
|
||||
# to mount data storage
|
||||
- import_playbook: provision-datastore.yml
|
||||
|
||||
# Finally: setup the docker swarm. Configures the workers, security, web proxy, and
|
||||
# management system. Once done, applications are ready for deployment
|
||||
- import_playbook: provison-swarm.yml
|
||||
1
playbooks/templates
Symbolic link
1
playbooks/templates
Symbolic link
@@ -0,0 +1 @@
|
||||
../resources
|
||||
@@ -1,57 +0,0 @@
|
||||
---
|
||||
# - hosts: vm-host-plex.net.enp.one
|
||||
# #gather_facts: false
|
||||
# tasks:
|
||||
# - name: Query plex API (shhh) to load latest releases
|
||||
# get_url:
|
||||
# url: https://plex.tv/api/downloads/5.json
|
||||
# dest: "{{ plex_releases_file | default('/tmp/plexreleases.json') }}"
|
||||
|
||||
- hosts: plex
|
||||
name: Update Plex Media Server to latest version
|
||||
vars:
|
||||
plex_releases: "{{ lookup('url', 'https://plex.tv/api/downloads/5.json') | from_json }}"
|
||||
tasks:
|
||||
- name: Identifiy the proper release file
|
||||
when: (ansible_os_family | lower == item["distro"]) and (ansible_distribution | lower in item["label"] | lower) and (ansible_userspace_bits in item["label"])
|
||||
set_fact:
|
||||
plex_release_url: "{{ item.url }}"
|
||||
plex_release_checksum: "{{ item.checksum }}"
|
||||
loop: "{{ plex_releases['computer']['Linux']['releases'] }}"
|
||||
|
||||
- name: Download package
|
||||
get_url:
|
||||
url: "{{ plex_release_url }}"
|
||||
checksum: sha1:{{ plex_release_checksum }}
|
||||
dest: /tmp/plexmediaserver-{{ plex_release_checksum }}.{{ plex_release_url.split(".")[-1] }}
|
||||
|
||||
- name: Stop the PMS service
|
||||
become: true
|
||||
systemd:
|
||||
name: "{{ plex_service | default('plexmediaserver') }}"
|
||||
state: stopped
|
||||
|
||||
- name: Install update package
|
||||
become: true
|
||||
block:
|
||||
- name: Install update package using DNF
|
||||
when: ansible_distribution == "Fedora"
|
||||
dnf:
|
||||
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
|
||||
state: latest
|
||||
- name: Install update package using YUM
|
||||
when: ansible_distribution == "CentOS"
|
||||
yum:
|
||||
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
|
||||
state: latest
|
||||
- name: Install update package using APT
|
||||
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
|
||||
apt:
|
||||
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.deb
|
||||
state: latest
|
||||
|
||||
- name: Start the PMS service
|
||||
become: true
|
||||
systemd:
|
||||
name: "{{ plex_service | default('plexmediaserver') }}"
|
||||
state: started
|
||||
1463
poetry.lock
generated
1463
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -2,22 +2,23 @@
|
||||
name = "omni-ansible"
|
||||
version = "0.0.0"
|
||||
description = "Network deployment procedures and configuration state"
|
||||
authors = ["Ethan Paul <e@enp.one>"]
|
||||
authors = ["Ethan Paul <me@enp.one>"]
|
||||
license = "MIT"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
ansible = "^2.9.4"
|
||||
paramiko = "^2.7.1"
|
||||
jinja2 = "^2.11.1"
|
||||
docker = "^4.2.0"
|
||||
docker-compose = "^1.25.4"
|
||||
paramiko = "^2.7.1"
|
||||
jsondiff = "^1.2.0"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
ansible-lint = "^4.2.0"
|
||||
yamllint = "^1.20.0"
|
||||
ansible-toolbox = "^0.3"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry>=1.0.0"]
|
||||
build-backend = "poetry.masonry.api"
|
||||
pre-commit = "^2.9.2"
|
||||
pre-commit-hooks = "^3.3.0"
|
||||
safety = "^1.9.0"
|
||||
tox = "^3.20.1"
|
||||
tox-poetry-installer = "^0.5.2"
|
||||
yamllint = "^1.20.0"
|
||||
|
||||
57
resources/atom-config.cson
Normal file
57
resources/atom-config.cson
Normal file
@@ -0,0 +1,57 @@
|
||||
"*":
|
||||
"autocomplete-python":
|
||||
useKite: false
|
||||
core:
|
||||
disabledPackages: [
|
||||
"about"
|
||||
"background-tips"
|
||||
"github"
|
||||
"image-view"
|
||||
"metrics"
|
||||
"open-on-github"
|
||||
]
|
||||
telemetryConsent: "no"
|
||||
themes: [
|
||||
"one-dark-ui"
|
||||
"base16-tomorrow-dark-theme"
|
||||
]
|
||||
editor:
|
||||
fontSize: 16
|
||||
invisibles: {}
|
||||
preferredLineLength: 100
|
||||
"exception-reporting":
|
||||
userId: "21f90c70-b680-4a55-a906-c8d67e98bf28"
|
||||
"ide-python":
|
||||
pylsPlugins:
|
||||
flake8:
|
||||
ignore: [
|
||||
"E121"
|
||||
"E123"
|
||||
"E126"
|
||||
"E226"
|
||||
"E24"
|
||||
"E704"
|
||||
"W503"
|
||||
"W504"
|
||||
"E501"
|
||||
]
|
||||
pycodestyle:
|
||||
ignore: [
|
||||
"E121"
|
||||
"E123"
|
||||
"E126"
|
||||
"E226"
|
||||
"E24"
|
||||
"E704"
|
||||
"W503"
|
||||
"E501"
|
||||
]
|
||||
maxLineLength: 100
|
||||
pyflakes: {}
|
||||
pylint:
|
||||
enabled: true
|
||||
rope_completion: {}
|
||||
python: "python3.7"
|
||||
"tree-view": {}
|
||||
welcome:
|
||||
showOnStartup: false
|
||||
4
resources/bash/aliases-workstation.sh
Normal file
4
resources/bash/aliases-workstation.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
alias doc='cd ~/Documents'
|
||||
alias dn='cd ~/Downloads'
|
||||
alias gg='cd ~/Git'
|
||||
alias explorer='nautilus'
|
||||
12
resources/bash/aliases.sh
Normal file
12
resources/bash/aliases.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
alias bk='cd -'
|
||||
alias fuck='sudo $(history -p \!\!)'
|
||||
alias ls='ls -lshF --color --group-directories-first --time-style=long-iso'
|
||||
alias version='uname -orp && lsb_release -a | grep Description'
|
||||
alias activate='source ./bin/activate'
|
||||
alias cls='clear'
|
||||
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
|
||||
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
|
||||
alias date='/usr/bin/date --iso-8601=seconds'
|
||||
alias whatismyip='curl https://icanhazip.com/'
|
||||
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
|
||||
alias epoch="python3 -c 'import time; print(time.time());'"
|
||||
7
resources/bash/global.sh
Normal file
7
resources/bash/global.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
function _parse_git_branch() {
|
||||
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
|
||||
}
|
||||
|
||||
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
|
||||
export rc=/home/$USERNAME/.bashrc
|
||||
export VIRTUALENV_DIR=/home/$USERNAME/.venvs
|
||||
18
resources/bash/helpers.sh
Normal file
18
resources/bash/helpers.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
random() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
num=32
|
||||
else
|
||||
num=$1
|
||||
fi
|
||||
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $num | head -n 1
|
||||
}
|
||||
|
||||
function up() { cd $(eval printf '../'%.0s {1..$1}); }
|
||||
|
||||
function pipin() { pip freeze | grep $1; }
|
||||
|
||||
function passhash() {
|
||||
read -sp 'Password: ' tmppass;
|
||||
echo $tmppass | python3 -c 'import crypt; print(crypt.crypt(input(), crypt.mksalt(crypt.METHOD_SHA512)));';
|
||||
unset tmppass;
|
||||
}
|
||||
76
resources/bash/pyenv.sh
Normal file
76
resources/bash/pyenv.sh
Normal file
@@ -0,0 +1,76 @@
|
||||
#!/env/bash
|
||||
|
||||
function pyenv () {
|
||||
usage="Custom Python virtualenv manager
|
||||
sivenv [list, delete, load, new] [VENV]
|
||||
Commands:
|
||||
list List existing virtualenvs (alias: 'ls')
|
||||
load VENV Activate the virtualenv named VENV (alias: 'source')
|
||||
new VENV [VERSION] Create and load a new virtualenv named VENV. Optionally VERSION
|
||||
can be a python version to use for creating the venv. Note that
|
||||
only python3 versions are supported.
|
||||
delete VENV Delete the virtualenv named VENV (alias: 'rm')";
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "Error: no command specified" >&2;
|
||||
echo "$usage";
|
||||
return 1;
|
||||
fi;
|
||||
|
||||
case $1 in
|
||||
"-h"| "--help")
|
||||
echo "$usage";
|
||||
return 0;;
|
||||
"ls"| "list")
|
||||
lsvenv "$VIRTUALENV_DIR";;
|
||||
"rm"| "delete")
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "Error: no virtualenv specified" >&2;
|
||||
return 1;
|
||||
fi;
|
||||
rm --recursive --force "${VIRTUALENV_DIR:?}/$2";;
|
||||
"source" | "load")
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "Error: no virtualenv specified" >&2;
|
||||
return 1;
|
||||
fi;
|
||||
# shellcheck source=/dev/null
|
||||
source "$VIRTUALENV_DIR/$2/bin/activate";;
|
||||
"new")
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Error: no virtualenv specified" >&2;
|
||||
return 1;
|
||||
fi;
|
||||
if [ $# -eq 3 ]; then
|
||||
version="$3";
|
||||
else
|
||||
version="3";
|
||||
fi
|
||||
if ! command -v "python$version" &>/dev/null; then
|
||||
echo "Error: no interpreter found for python version '$version'" >&2;
|
||||
return 2;
|
||||
fi
|
||||
|
||||
if python$version -m venv "$VIRTUALENV_DIR/$2"; then
|
||||
echo "New virtualenv '$2' created using $(command -v python$version)" >&2;
|
||||
# shellcheck source=/dev/null
|
||||
source "$VIRTUALENV_DIR/$2/bin/activate"
|
||||
else
|
||||
return $?;
|
||||
fi;;
|
||||
*)
|
||||
echo "Error: unknown command '$1'" >&2;
|
||||
echo "$usage";
|
||||
return 1;;
|
||||
esac
|
||||
}
|
||||
|
||||
function lsvenv () {
|
||||
venvs=()
|
||||
for item in /usr/bin/ls -d "$1"/*/; do
|
||||
if stat "${item}/bin/activate" &>/dev/null; then
|
||||
venvs+=("$(basename "$item")");
|
||||
fi
|
||||
done
|
||||
echo "${venvs[*]}"
|
||||
}
|
||||
20
resources/bash/setup-atom.sh
Normal file
20
resources/bash/setup-atom.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
curl -o atom.rpm https://github.com/atom/atom/releases/download/v1.53.0/atom.x86_64.rpm
|
||||
dnf install atom.rpm
|
||||
|
||||
python3.7 -m pip install \
|
||||
python-language-server[all]==0.21.5 \
|
||||
parso==0.5.2 \
|
||||
jedi==0.15.2
|
||||
|
||||
apm install \
|
||||
atom-ide-ui@0.13.0 \
|
||||
atom-jinja2@0.6.0 \
|
||||
atom-typescript@14.1.2 \
|
||||
autocomplete-python@1.16.0 \
|
||||
ide-python@1.6.2 \
|
||||
ide-typescript@0.9.1 \
|
||||
language-docker \
|
||||
language-ini \
|
||||
language-restructuredtext \
|
||||
language-rpm-spec \
|
||||
minimap
|
||||
257
resources/docker-compose/bitwarden.yaml.j2
Normal file
257
resources/docker-compose/bitwarden.yaml.j2
Normal file
@@ -0,0 +1,257 @@
|
||||
---
|
||||
version: "{{ omni_compose_version | string }}"
|
||||
|
||||
|
||||
x-global-env: &globalenv
|
||||
LOCAL_UID: "{{ omni_compose_apps.bitwarden.account.uid | string }}"
|
||||
LOCAL_GID: "{{ omni_compose_apps.bitwarden.account.uid | string}}"
|
||||
ASPNETCORE_ENVIRONMENT: Production
|
||||
globalSettings__selfHosted: "true"
|
||||
globalSettings__baseServiceUri__vault: https://{{ omni_compose_apps.bitwarden.published.host }}
|
||||
globalSettings__baseServiceUri__api: https://{{ omni_compose_apps.bitwarden.published.host }}/api
|
||||
globalSettings__baseServiceUri__identity: https://{{ omni_compose_apps.bitwarden.published.host }}/identity
|
||||
globalSettings__baseServiceUri__admin: https://{{ omni_compose_apps.bitwarden.published.host }}/admin
|
||||
globalSettings__baseServiceUri__notifications: https://{{ omni_compose_apps.bitwarden.published.host }}/notifications
|
||||
globalSettings__baseServiceUri__internalNotifications: http://bitwarden_notifications:5000
|
||||
globalSettings__baseServiceUri__internalAdmin: http://bitwarden_admin:5000
|
||||
globalSettings__baseServiceUri__internalIdentity: http://bitwarden_identity:5000
|
||||
globalSettings__baseServiceUri__internalApi: http://bitwarden_api:5000
|
||||
globalSettings__baseServiceUri__internalVault: http://bitwarden_web:5000
|
||||
globalSettings__pushRelayBaseUri: https://push.bitwarden.com
|
||||
globalSettings__installation__identityUri: https://identity.bitwarden.com
|
||||
globalSettings__sqlServer__connectionString: "Data Source=tcp:mssql,1433;Initial Catalog=vault;Persist Security Info=False;User ID=sa;Password=e934c0bb-3b5a-4e6b-b525-cd6d83004e1a;MultipleActiveResultSets=False;Connect Timeout=30;Encrypt=True;TrustServerCertificate=True"
|
||||
globalSettings__identityServer__certificatePassword: {{ omni_compose_app_secrets.bitwarden.identity_server_certificate_password }}
|
||||
globalSettings__attachment__baseDirectory: /etc/bitwarden/core/attachments
|
||||
globalSettings__attachment__baseUrl: https://{{ omni_compose_apps.bitwarden.published.host }}/attachments
|
||||
globalSettings__dataProtection__directory: /etc/bitwarden/core/aspnet-dataprotection
|
||||
globalSettings__logDirectory: /etc/bitwarden/logs
|
||||
globalSettings__licenseDirectory: /etc/bitwarden/core/licenses
|
||||
globalSettings__internalIdentityKey: {{ omni_compose_app_secrets.bitwarden.internal_identity_key }}
|
||||
globalSettings__duo__aKey: {{ omni_compose_app_secrets.bitwarden.duo_akey }}
|
||||
globalSettings__installation__id: {{ omni_compose_app_secrets.bitwarden.installation_id }}
|
||||
globalSettings__installation__key: {{ omni_compose_app_secrets.bitwarden.installation_key }}
|
||||
globalSettings__yubico__clientId: REPLACE
|
||||
globalSettings__yubico__key: REPLACE
|
||||
globalSettings__mail__replyToEmail: noreply@enp.one
|
||||
globalSettings__mail__smtp__host: REPLACE
|
||||
globalSettings__mail__smtp__port: "587"
|
||||
globalSettings__mail__smtp__ssl: "false"
|
||||
globalSettings__mail__smtp__username: REPLACE
|
||||
globalSettings__mail__smtp__password: REPLACE
|
||||
globalSettings__disableUserRegistration: "false"
|
||||
globalSettings__hibpApiKey: REPLACE
|
||||
adminSettings__admins: ""
|
||||
|
||||
|
||||
volumes:
|
||||
bitwarden-db-data:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/mssql/data
|
||||
driver: glusterfs
|
||||
bitwarden-db-backup:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/mssql/backup
|
||||
bitwarden-nginx-data:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/nginx
|
||||
driver: glusterfs
|
||||
bitwarden-web:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/web
|
||||
driver: glusterfs
|
||||
bitwarden-ssl:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/ssl
|
||||
driver: glusterfs
|
||||
bitwarden-ca-certs:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/ca-certificates
|
||||
driver: glusterfs
|
||||
bitwarden-core:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/core
|
||||
driver: glusterfs
|
||||
bitwarden-identity:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/identity
|
||||
driver: glusterfs
|
||||
bitwarden-logs-api:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/api
|
||||
driver: glusterfs
|
||||
bitwarden-logs-db:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/mssql
|
||||
driver: glusterfs
|
||||
bitwarden-logs-identity:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/identity
|
||||
driver: glusterfs
|
||||
bitwarden-logs-nginx:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/nginx
|
||||
driver: glusterfs
|
||||
bitwarden-logs-admin:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/admin
|
||||
driver: glusterfs
|
||||
bitwarden-logs-icons:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/icons
|
||||
driver: glusterfs
|
||||
bitwarden-logs-notifications:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/notifications
|
||||
driver: glusterfs
|
||||
bitwarden-logs-events:
|
||||
name: datastore{{ omni_compose_apps.bitwarden.datastore }}/logs/events
|
||||
driver: glusterfs
|
||||
|
||||
|
||||
networks:
|
||||
bitwarden_internal:
|
||||
internal: true
|
||||
name: bitwarden_internal
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ omni_compose_apps.bitwarden.networks.internal }}
|
||||
bitwarden_external:
|
||||
internal: false
|
||||
name: bitwarden_external
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ omni_compose_apps.bitwarden.networks.external }}
|
||||
|
||||
|
||||
services:
|
||||
mssql:
|
||||
image: bitwarden/mssql:{{ omni_compose_apps.bitwarden.versions.mssql | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
stop_grace_period: 60s
|
||||
networks:
|
||||
- bitwarden_internal
|
||||
volumes:
|
||||
- bitwarden-db-data:/var/opt/mssql/data
|
||||
- bitwarden-db-backup:/etc/bitwarden/mssql/backups
|
||||
- bitwarden-logs-db:/var/opt/mssql/log
|
||||
environment:
|
||||
LOCAL_UID: "{{ omni_compose_apps.bitwarden.account.uid | string }}"
|
||||
LOCAL_GID: "{{ omni_compose_apps.bitwarden.account.uid | string }}"
|
||||
ACCEPT_EULA: "Y"
|
||||
MSSQL_PID: Express
|
||||
SA_PASSWORD: {{ omni_compose_app_secrets.bitwarden.mssql_sa_password }}
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
web:
|
||||
image: bitwarden/web:{{ omni_compose_apps.bitwarden.versions.web | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
networks:
|
||||
- bitwarden_internal
|
||||
volumes:
|
||||
- bitwarden-web:/etc/bitwarden/web
|
||||
environment: *globalenv
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
attachments:
|
||||
image: bitwarden/attachments:{{ omni_compose_apps.bitwarden.versions.attachments | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
networks:
|
||||
- bitwarden_internal
|
||||
volumes:
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
environment: *globalenv
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
api:
|
||||
image: bitwarden/api:{{ omni_compose_apps.bitwarden.versions.api | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
volumes:
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-api:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
identity:
|
||||
image: bitwarden/identity:{{ omni_compose_apps.bitwarden.versions.identity | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
volumes:
|
||||
- bitwarden-identity:/etc/bitwarden/identity
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-identity:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
admin:
|
||||
image: bitwarden/admin:{{ omni_compose_apps.bitwarden.versions.admin | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
depends_on:
|
||||
- mssql
|
||||
volumes:
|
||||
- bitwarden-core:/etc/bitwarden/core
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-admin:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
icons:
|
||||
image: bitwarden/icons:{{ omni_compose_apps.bitwarden.versions.icons | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
volumes:
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-icons:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
notifications:
|
||||
image: bitwarden/notifications:{{ omni_compose_apps.bitwarden.versions.notifications | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
volumes:
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-notifications:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
events:
|
||||
image: bitwarden/events:{{ omni_compose_apps.bitwarden.versions.events | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
volumes:
|
||||
- bitwarden-ca-certs:/etc/bitwarden/ca-certificates
|
||||
- bitwarden-logs-events:/etc/bitwarden/logs
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
nginx:
|
||||
image: bitwarden/nginx:{{ omni_compose_apps.bitwarden.versions.nginx | default(omni_compose_apps.bitwarden.versions.default) }}
|
||||
depends_on:
|
||||
- web
|
||||
- admin
|
||||
- api
|
||||
- identity
|
||||
ports:
|
||||
- published: {{ omni_compose_apps.bitwarden.published.ports.8080 }}
|
||||
target: 8080
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.bitwarden.published.ports.8443 }}
|
||||
target: 8443
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- bitwarden-nginx-data:/etc/bitwarden/nginx
|
||||
- bitwarden-ssl:/etc/ssl
|
||||
- bitwarden-logs-nginx:/var/log/nginx
|
||||
environment: *globalenv
|
||||
networks:
|
||||
- bitwarden_external
|
||||
- bitwarden_internal
|
||||
deploy:
|
||||
replicas: 1
|
||||
51
resources/docker-compose/gitea.yaml.j2
Normal file
51
resources/docker-compose/gitea.yaml.j2
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
version: "{{ omni_compose_version | string }}"
|
||||
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
name: gitea
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ omni_compose_apps.gitea.networks.main }}
|
||||
|
||||
|
||||
volumes:
|
||||
gitea-data:
|
||||
name: datastore{{ omni_compose_apps.gitea.datastore }}
|
||||
driver: glusterfs
|
||||
|
||||
|
||||
services:
|
||||
server:
|
||||
image: gitea/gitea:{{ omni_compose_apps.gitea.versions.gitea | default(omni_compose_apps.gitea.versions.default) }}
|
||||
hostname: gitea
|
||||
networks:
|
||||
- gitea
|
||||
ports:
|
||||
- published: {{ omni_compose_apps.gitea.published.ports.3000 }}
|
||||
target: 3000
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.gitea.published.ports.22 }}
|
||||
target: 22
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: gitea-data
|
||||
target: /data
|
||||
read_only: false
|
||||
environment:
|
||||
USER_UID: "{{ omni_compose_apps.gitea.account.uid | string }}"
|
||||
USER_GID: "{{ omni_compose_apps.gitea.account.uid | string }}"
|
||||
APP_NAME: ENP VCS
|
||||
RUN_MODE: prod
|
||||
DOMAIN: jupiter.net.enp.one
|
||||
ROOT_URL: https://{{ omni_compose_apps.gitea.published.host }}/
|
||||
DB_TYPE: sqlite3
|
||||
DISABLE_REGISTRATION: "true"
|
||||
deploy:
|
||||
replicas: 1
|
||||
53
resources/docker-compose/minecraft.yaml.j2
Normal file
53
resources/docker-compose/minecraft.yaml.j2
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
version: "{{ omni_compose_version | string }}"
|
||||
|
||||
|
||||
networks:
|
||||
minecraft:
|
||||
name: minecraft
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ omni_compose_apps.minecraft.networks.main }}
|
||||
|
||||
|
||||
volumes:
|
||||
minecraft-data:
|
||||
name: datastore{{ omni_compose_apps.minecraft.datastore }}
|
||||
driver: glusterfs
|
||||
|
||||
|
||||
services:
|
||||
server:
|
||||
image: itzg/minecraft-server:{{ omni_compose_apps.minecraft.versions.main }}
|
||||
hostname: minecraft
|
||||
networks:
|
||||
- minecraft
|
||||
ports:
|
||||
- published: {{ omni_compose_apps.minecraft.published.ports.25565 }}
|
||||
target: 25565
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: minecraft-data
|
||||
target: /data
|
||||
read_only: false
|
||||
environment:
|
||||
EULA: "TRUE"
|
||||
TZ: Americas/New_York
|
||||
VERSION: {{ omni_compose_apps.minecraft.versions.server }}
|
||||
MAX_MEMORY: "8G"
|
||||
MOTD: "A home for buttery companions"
|
||||
MODE: survival
|
||||
OPS: ScifiGeek42
|
||||
WHITELIST: "ScifiGeek42,fantasycat256,CoffeePug,Snowdude21325,KaiserSJR,glutenfreebean"
|
||||
MAX_BUILD_HEIGHT: "512"
|
||||
SNOOPER_ENABLED: "false"
|
||||
ICON: https://cdn.enp.one/img/logos/e-w-sm.png
|
||||
ENABLE_RCON: "false"
|
||||
UID: "{{ omni_compose_apps.minecraft.account.uid | string }}"
|
||||
GID: "{{ omni_compose_apps.minecraft.account.uid | string }}"
|
||||
deploy:
|
||||
replicas: 1
|
||||
144
resources/docker-compose/nextcloud.yaml.j2
Normal file
144
resources/docker-compose/nextcloud.yaml.j2
Normal file
@@ -0,0 +1,144 @@
|
||||
---
|
||||
version: "{{ omni_compose_version | string }}"
|
||||
|
||||
|
||||
x-server-env: &server-env
|
||||
NEXTCLOUD_DATA_DIR: /data/
|
||||
NEXTCLOUD_ADMIN_USER: admin
|
||||
NEXTCLOUD_ADMIN_PASSWORD: {{ omni_compose_app_secrets.nextcloud.admin_password }}
|
||||
NEXTCLOUD_TRUSTED_DOMAINS: localhost {{ inventory_hostname }} {{ omni_compose_apps.nextcloud.published.host }}
|
||||
MYSQL_DATABASE: nextcloud
|
||||
MYSQL_USER: root
|
||||
MYSQL_PASSWORD: {{ omni_compose_app_secrets.nextcloud.database_password }}
|
||||
MYSQL_HOST: database
|
||||
REDIS_HOST: cache
|
||||
PHP_MEMORY_LIMIT: "12G"
|
||||
PHP_UPLOAD_LIMIT: "6G"
|
||||
PHP_INI_SCAN_DIR: /usr/local/etc/php/conf.d:/var/www/html/
|
||||
|
||||
|
||||
networks:
|
||||
nextcloud:
|
||||
name: nextcloud
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ omni_compose_apps.nextcloud.networks.main }}
|
||||
|
||||
|
||||
volumes:
|
||||
database:
|
||||
name: datastore{{ omni_compose_apps.nextcloud.datastore }}/database
|
||||
driver: glusterfs
|
||||
data:
|
||||
name: datastore/{{ omni_compose_apps.nextcloud.datastore }}/userdata
|
||||
driver: glusterfs
|
||||
config:
|
||||
name: datastore{{ omni_compose_apps.nextcloud.datastore }}/config
|
||||
driver: glusterfs
|
||||
proxy:
|
||||
name: datastore{{ omni_compose_apps.nextcloud.datastore }}/proxy
|
||||
driver: glusterfs
|
||||
|
||||
|
||||
services:
|
||||
database:
|
||||
image: mariadb:{{ omni_compose_apps.nextcloud.versions.database | default(omni_compose_apps.nextcloud.versions.default) }}
|
||||
hostname: nextcloud-database
|
||||
networks:
|
||||
- nextcloud
|
||||
volumes:
|
||||
- type: volume
|
||||
source: database
|
||||
target: /var/lib/mysql
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: proxy
|
||||
target: /etc/mysql/conf.d
|
||||
read_only: true
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: {{ omni_compose_app_secrets.nextcloud.database_password }}
|
||||
MYSQL_DATABASE: nextcloud
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
cache:
|
||||
image: redis:{{ omni_compose_apps.nextcloud.versions.cache | default(omni_compose_apps.nextcloud.versions.default) }}
|
||||
hostname: nextcloud-cache
|
||||
networks:
|
||||
- nextcloud
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
proxy:
|
||||
image: nginx:{{ omni_compose_apps.nextcloud.versions.proxy | default(omni_compose_apps.nextcloud.versions.default) }}
|
||||
hostname: nextcloud-proxy
|
||||
networks:
|
||||
- nextcloud
|
||||
depends_on:
|
||||
- server
|
||||
ports:
|
||||
- published: {{ omni_compose_apps.nextcloud.published.ports.80 }}
|
||||
target: 80
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: config
|
||||
target: /usr/share/nginx/nextcloud
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: proxy
|
||||
target: /etc/nginx/conf.d
|
||||
read_only: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
server:
|
||||
image: nextcloud:{{ omni_compose_apps.nextcloud.versions.server | default(omni_compose_apps.nextcloud.versions.default) }}
|
||||
hostname: nextcloud-server
|
||||
user: "{{ omni_compose_apps.nextcloud.account.uid }}"
|
||||
networks:
|
||||
- nextcloud
|
||||
depends_on:
|
||||
- database
|
||||
- cache
|
||||
volumes:
|
||||
- type: volume
|
||||
source: data
|
||||
target: /data
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: config
|
||||
target: /var/www/html
|
||||
read_only: false
|
||||
environment: *server-env
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
cron:
|
||||
image: nextcloud:{{ omni_compose_apps.nextcloud.versions.server | default(omni_compose_apps.nextcloud.versions.default) }}
|
||||
command: php /var/www/html/cron.php
|
||||
hostname: nextcloud-cron
|
||||
user: "{{ omni_compose_apps.nextcloud.account.uid }}"
|
||||
networks:
|
||||
- nextcloud
|
||||
depends_on:
|
||||
- database
|
||||
- cache
|
||||
volumes:
|
||||
- type: volume
|
||||
source: data
|
||||
target: /data
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: config
|
||||
target: /var/www/html
|
||||
read_only: false
|
||||
environment: *server-env
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: "4m"
|
||||
90
resources/docker-compose/plex.yaml.j2
Normal file
90
resources/docker-compose/plex.yaml.j2
Normal file
@@ -0,0 +1,90 @@
|
||||
---
|
||||
version: "{{ omni_compose_version | string }}"
|
||||
|
||||
|
||||
networks:
|
||||
plex:
|
||||
name: plex
|
||||
driver: overlay
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: {{ omni_compose_apps.plex.networks.main }}
|
||||
|
||||
|
||||
volumes:
|
||||
plex-config:
|
||||
name: datastore{{ omni_compose_apps.plex.datastore }}
|
||||
driver: glusterfs
|
||||
plex-data:
|
||||
name: plex-data
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: nfs
|
||||
o: "addr=plexistore.tre2.local,ro"
|
||||
device: ":/nfs/plex"
|
||||
plex-personal:
|
||||
name: datastore/media
|
||||
driver: glusterfs
|
||||
|
||||
|
||||
services:
|
||||
server:
|
||||
image: plexinc/pms-docker:{{ omni_compose_apps.plex.versions.default }}
|
||||
hostname: plex-media-server
|
||||
networks:
|
||||
- plex
|
||||
ports:
|
||||
- published: {{ omni_compose_apps.plex.published.ports.32400 }}
|
||||
target: 32400
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.3005 }}
|
||||
target: 3005
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.8324 }}
|
||||
target: 8324
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.32469 }}
|
||||
target: 32469
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.1900 }}
|
||||
target: 1900
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.32410 }}
|
||||
target: 32410
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.32413 }}
|
||||
target: 32413
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
- published: {{ omni_compose_apps.plex.published.ports.32414 }}
|
||||
target: 32414
|
||||
protocol: udp
|
||||
mode: ingress
|
||||
volumes:
|
||||
- type: volume
|
||||
source: plex-config
|
||||
target: /config
|
||||
read_only: false
|
||||
- type: volume
|
||||
source: plex-data
|
||||
target: /data
|
||||
read_only: true
|
||||
- type: volume
|
||||
source: plex-personal
|
||||
target: /personal
|
||||
read_only: false
|
||||
environment:
|
||||
TZ: "Americas/New_York"
|
||||
ALLOWED_NETWORKS: 10.42.100.0/24,10.42.101.0/24
|
||||
PLEX_UID: "{{ omni_compose_apps.plex.account.uid }}"
|
||||
PLEX_GID: "{{ omni_compose_apps.plex.account.uid }}"
|
||||
ADVERTISE_IP: "http://10.42.101.10:32400/"
|
||||
deploy:
|
||||
replicas: 1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user