Compare commits

..

No commits in common. "1e1d4d75a03b898348ddc5468858aafba109391f" and "38ce173ad53e7531016a9b616918b160ef8c999d" have entirely different histories.

122 changed files with 2705 additions and 2106 deletions

View File

@ -1,28 +0,0 @@
---
# All of the pre-commit hooks here actually use the `pytyhon` pre-commit language
# setting. However, for the python language setting, pre-commit will create and manage
# a cached virtual environment for each hook ID and do a bare `pip install <repo>` into
# the venv to setup the hook. This can result in conflicting dependency versions between
# the version installed to the pre-commit venv and the version installed to the Poetry
# venv specified in the lockfile.
#
# The solution is to specify `language: system` for all hooks and then install the
# required dependencies to the Poetry venv. The `system` language skips the isolated
# venv creation and looks for the entrypoint specified by the hook in the global
# environment which, if running in the Poetry venv, will find the entrypoint provided
# by the Poetry-managed dependency.
#
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.3.0
hooks:
- id: end-of-file-fixer
language: system
- id: fix-encoding-pragma
args:
- "--remove"
language: system
- id: trailing-whitespace
language: system
- id: check-merge-conflict
language: system

View File

@ -3,39 +3,6 @@
Network Ansible configurations Network Ansible configurations
* The `omni_*` prefix is used for custom variables defined and used internally to * The `omni_*` prefix is used for custom variables defined and used internally to
distinguish them from `ansible_*` or other variables. The `_runtime_` prefix should distinguish them from `ansible_*` or other variables
be used for runtime variables * Roles: things machines do. Tasks: how those things are done. Platform compatibility
* Passing `clean=true` should force cleaning any and all cached stuff should be handled in tasks. Config logic should be handled in roles.
* Passing `update=true` should update any unpinned _things_ to their latest version
Organizational notes:
* Playbooks should be platform/device agnostic. Any playbook should be runnable against
any device. If the config a playbook deploys isn't applicable to that device then the
playbook should be laid out so that it skips any inapplicable hosts.
* Building from that, platform-conditionals should go in task files: `when` conditions
in playbooks should be limited to runtime conditions.
Target notes:
* The `'mgmt'` target grants remote management access. This usually means SSH + local
login access, but can also mean web interface (cockpit, erx, etc)
General workflow:
1. Run `provision.yml` - this gets the entire environment into a ready-to-go state but
does not deploy any actual applications or perform any target tasks
2. Run one or more `deploy-*.yml` - this deploys the application noted to the system
3. Run one or more `do-*.yml` - this performs one off tasks
## local env creation
Requires Poetry-1.1+
```bash
git clone https://vcs.enp.one/omni/omni-ansible.git
cd omni-ansible/
poetry install
```

8
_legacy/groups/all.yml Normal file
View File

@ -0,0 +1,8 @@
---
ansible_user: ansible
protected_users:
- root
- ansible
domain: net.enp.one

12
_legacy/groups/cloud.yml Normal file
View File

@ -0,0 +1,12 @@
---
enable_gui: False
enable_ssh: True
enable_ssh_password_auth: False
disable_sudo_password: True
enable_networkd: True
generate_keys: False

View File

@ -0,0 +1,12 @@
---
enable_gui: False
enable_ssh: True
enable_ssh_password_auth: False
disable_sudo_password: False
enable_networkd: True
generate_keys: False

12
_legacy/groups/vms.yml Normal file
View File

@ -0,0 +1,12 @@
---
enable_gui: False
enable_ssh: True
enable_ssh_password_auth: False
disable_sudo_password: True
enable_networkd: True
generate_keys: False

View File

@ -0,0 +1,12 @@
---
enable_gui: True
enable_ssh: False
enable_ssh_password_auth: False
disable_sudo_password: False
enable_networkd: False
generate_keys: False

6
_legacy/hosts/apex.yml Normal file
View File

@ -0,0 +1,6 @@
---
description: "EN1 Reverse Proxy / EN1 VPN Server"
targets:
- admin
- vpn

View File

@ -0,0 +1,8 @@
---
description: "Wandering excursion"
targets:
- admin
- workstations
ansible_python_interpreter: /usr/bin/python3

View File

@ -0,0 +1,20 @@
---
description: "EN1 System Control Node"
targets:
- admin
- network
networking:
eno1:
dhcp: Yes
eno2:
addresses: ["192.168.255.10/24"]
# demo:
# addresses: ["192.168.1.10/24", "192.168.100.10/24"]
# dhcp: true
# dhcp6: true
# gateway: 192.168.1.1
# dns: ["8.8.8.8", "8.8.4.4"]
# vlans: ["101", "200"]

View File

@ -0,0 +1,6 @@
---
description: "EN2 Digitial Ocean Cloud Server"
targets:
- admin
- web

View File

@ -0,0 +1,5 @@
---
description: "EN1 Secondary Datastore"
targets:
- admin
- datastore

View File

@ -0,0 +1,6 @@
---
description: "And the Last"
targets:
- admin
- workstations

View File

@ -0,0 +1,9 @@
---
description: "EN1 Primary Datastore / EN1 Secondary Hypervisor"
targets:
- admin
- datastore
networking:
ovirtmgt:

View File

@ -0,0 +1,10 @@
---
description: "EN1 Primary Hypervisor"
targets:
- admin
- datastore
- hypervisor
networking:
ovirtmgt:

View File

@ -0,0 +1,33 @@
---
description: EN1 Core Router
ansible_network_os: edgeos
targets:
- admin
- network
network:
ethernet_eth0:
address: dhcp
description: UPLINK
extra:
- duplex auto
- speed auto
ethernet_eth1:
address: 10.42.100.1/24
description: PUBLIC
extra:
- duplex auto
- speed auto
ethernet_eth2:
address: 10.42.101.1/24
description: PRIVATE
extra:
- duplex auto
- speed auto
ethernet_eth2_vif_10:
address: 10.42.102.1/24
description: SECURE
extra:
- mtu 1500

View File

@ -0,0 +1,8 @@
---
description: "Smooth as Silk"
targets:
- admin
- workstations
ansible_python_interpreter: /usr/bin/python3

View File

@ -0,0 +1,8 @@
---
description: "Watcher who Watches the Watchmen"
targets:
- admin
- workstations
ansible_python_interpreter: /usr/bin/python3

View File

@ -0,0 +1,5 @@
---
description: "Database Host: MariaDB"
targets:
- admin

View File

@ -0,0 +1,5 @@
---
description: "Database Host: MySQL"
targets:
- admin

View File

@ -0,0 +1,5 @@
---
description: "Database Host: PrometheusDB"
targets:
- admin

View File

@ -0,0 +1,6 @@
---
description: "Development Host: Nginx Web Server"
targets:
- admin
- web

View File

@ -0,0 +1,9 @@
---
description: "Application Host: Bitwarden"
targets:
- admin
- bitwarden
networking:
eth0:

View File

@ -0,0 +1,9 @@
---
description: "Application Host: Gitea"
targets:
- admin
- gitea
networking:
eth0:

View File

@ -0,0 +1,9 @@
---
description: "Application Host: Minecraft"
targets:
- admin
- minecraft
networking:
eth0:

View File

@ -0,0 +1,9 @@
---
description: "Application Host: Nextcloud"
targets:
- admin
- nextcloud
networking:
eth0:

View File

@ -0,0 +1,6 @@
---
description: "Application Host: Plex Media Server"
targets:
- admin
- plex

View File

View File

@ -0,0 +1,32 @@
---
- import_playbook: dependencies.yml
- name: Setup environment
hosts: all:!network
tags:
- initialize
vars:
restart_services: true
roles:
- role: packages
vars:
update: true
exclude: [] # Override the default kernel exclusion
clean: true
- role: sshd
- role: networkd
tasks:
- name: Set hostname
become: true
hostname:
name: "{{ inventory_hostname }}"
- name: Install global bashrc
become: true
copy:
src: bashrc.sh
dest: /etc/profile.d/ZA-enpn-bashrc.sh
mode: 0644
- import_playbook: deploy-local-auth.yml
- import_playbook: deploy-sshkeys.yml

View File

View File

@ -0,0 +1,26 @@
---
- hosts: vms
name: Replace NetworkManager with systemd-networkd
tasks:
- name: Install systemd-networkd
when: enable_networkd == true
block:
- import_tasks: tasks/centos/networkd.yml
when: ansible_distribution == "CentOS"
- import_tasks: tasks/fedora/networkd.yml
when: ansible_distribution == "Fedora"
# - import_tasks: common/debian/networkd.yml
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
- import_tasks: tasks/networkd/config.yml
- import_tasks: tasks/networkd/services.yml
- hosts: vms
name: Install ovirt agent
tasks:
- name: Install ovirt-agent
become: true
yum:
name: ovirt-guest-agent
state: latest

View File

@ -0,0 +1,8 @@
# ANSIBLE MANAGED FILE - DO NOT EDIT
[Match]
Name={{ item.key }}
[Network]
DHCP=Yes
# EOF

View File

@ -0,0 +1,26 @@
---
- hosts: all
name: Upgrade packages
tasks:
- name: Upgrade YUM packages
when: ansible_distribution == "CentOS"
become: true
yum:
state: latest
name: "*"
exclude: kernel*{{ ',' + exclude_upgrade | default('') }}
- name: Upgrade DNF packages
when: ansible_distribution == "Fedora"
become: true
dnf:
state: latest
name: "*"
exclude: kernel*{{ ',' + exclude_upgrade | default('') }}
# - name: Upgrade APT packages
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
# become: true
# apt:

View File

@ -0,0 +1,132 @@
---
- import_playbook: dependencies.yml
- hosts: all:!network
name: Update local user accounts and access controls
tasks:
- import_tasks: tasks/users-preprocessing.yml
- name: Create local user accounts
tags: users_create
become: true
block:
- name: Create groups
group:
name: "{{ item }}"
state: present
loop: "{{ targets + ['omni'] }}"
- name: Create users
user:
name: "{{ item.name }}"
comment: "{{ item.fullname | default('') }}"
shell: /bin/bash
groups: "{{ item.targets | intersect(targets) + ['omni'] }}"
system: "{{ item.svc | default(False) }}"
state: present
generate_ssh_key: "{{ True if generate_keys | bool == true else False }}"
ssh_key_comment: "{{ item.name }}@{{ inventory_hostname }}"
ssh_key_bits: 4096
ssh_key_type: ed25519
password: "{{ item.password }}"
loop: "{{ local_users }}"
- name: Delete removed user accounts
become: true
user:
name: "{{ item }}"
state: absent
loop: "{{ local_removed_users | difference(protected_users) }}"
- name: Grant sudo permissions to admin user accounts
become: true
user:
name: "{{ item.name }}"
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
state: present
loop: "{{ local_admin_users }}"
- name: Disable sudo password for ansible
become: true
lineinfile:
create: true
path: /etc/sudoers.d/30-ansible
line: "ansible ALL=(ALL) NOPASSWD:ALL"
mode: 0644
- name: Disable sudo password for admin users
become: true
lineinfile:
create: true
path: /etc/sudoers.d/40-admin
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
mode: 0644
state: "{{ 'absent' if disable_sudo_password | bool == false else 'present' }}"
loop: "{{ local_admin_users }}"
- name: Configure GNOME
tags: users_gnome
when: ansible_distribution == "Fedora" and disable_gnome_user_list | bool == true
become: true
block:
- name: Configure GDM profile
blockinfile:
create: true
path: /etc/dconf/profile/gdm
block: |
user-db:user
system-db:gdm
file-db:/usr/share/gdm/greeter-dconf-defaults
- name: Configure GDM keyfile
blockinfile:
create: true
path: /etc/dconf/db/gdm.d/00-login-screen
block: |
[org/gnome/login-screen]
# Do not show the user list
disable-user-list=true
- name: Delete existing user database
file:
path: /var/lib/gdm/.config/dconf/user
state: absent
- name: Restart dconf database
shell: dconf update
- name: Ensure proper ownership of user home directories
become: true
file:
group: "{{ item.name }}"
owner: "{{ item.name }}"
path: /home/{{ item.name }}
recurse: true
state: directory
loop: "{{ local_users }}"
# - hosts: router.net.enp.one
# name: Configure users on router
# connection: network_cli
# vars:
# ansible_network_os: edgeos
# tasks:
# - import_tasks: tasks/users-preprocessing.yml
#
# - name: Create users
# edgeos_config:
# lines:
# - set system login user {{ item.name }} authentication encrypted-password "{{ item.password }}"
# - set system login user {{ item.name }} full-name "{{ item.fullname if item.fullname is defined else "" }}"
# - set system login user {{ item.name }} level {{ 'operator' if item.name != 'ansible' else 'admin' }}
# loop: "{{ local_users | difference([None]) }}"
#
# - name: Grant administrative access to admin users
# edgeos_config:
# lines:
# - set system login user {{ item.name }} level admin
# loop: "{{ local_admin_users | difference([None]) }}"
#
# - name: Assemble key files for loadkey usage
# edgeos_command:
# commands: sudo tee /tmp/{{ item.name }}.keys<<<"{{ item.sshkeys | join('\n') }}"
# loop: "{{ local_admin_users | difference([None]) }}"
#
# - import_playbook: deploy-sshkeys.yml

View File

@ -0,0 +1,59 @@
---
- hosts: router.net.enp.one
name: Configure users on router
connection: network_cli
<<<<<<< Updated upstream
gather_facts: false
=======
vars:
ansible_network_os: edgeos
>>>>>>> Stashed changes
tasks:
- import_tasks: tasks/users-preprocessing.yml
- name: Create users
edgeos_config:
lines:
- set system login user {{ item.name }} authentication encrypted-password "{{ item.password }}"
- set system login user {{ item.name }} full-name "{{ item.fullname if item.fullname is defined else "" }}"
- set system login user {{ item.name }} level {{ 'operator' if item.name != 'ansible' else 'admin' }}
loop: "{{ local_users | difference([None]) }}"
- name: Grant administrative access to admin users
edgeos_config:
lines:
- set system login user {{ item.name }} level admin
loop: "{{ local_admin_users | difference([None]) }}"
<<<<<<< Updated upstream
- name: Assemble loadkey files
edgeos_command:
commands:
- sudo tee "{{ item.sshkeys | join('\n') }}"<<</tmp/{{ item.name }}.keys
loop: "{{ local_admin_users | difference([None]) }}"
- name: Load keys
edgeos_config:
lines:
- loadkey {{ item }} /tmp/{{ item }}.keys
loop: "{{ local_admin_users | difference([None]) }}"
=======
- name: Assemble key files for loadkey usage
edgeos_command:
commands: sudo tee /tmp/{{ item.name }}.keys<<<"{{ item.sshkeys | join('\n') }}"
loop: "{{ local_admin_users | difference([None]) }}"
# - name: Assemble loadkey files
# copy:
# src: keys/{{ item }}
# dest: /tmp
# with_items:
# - "{{ local_admin_users | difference([None]) }}"
# - name: Load keys
# edgeos_config:
# lines:
# - loadkey {{ item }} /tmp/{{ item }}/*.pub
# with_items:
# - "{{ local_admin_users | difference([None]) }}"
>>>>>>> Stashed changes

View File

@ -0,0 +1,5 @@
---
- import_playbook: dependencies.yml
- import_playbook: update-system.yml
- import_playbook: update-users-local.yml

View File

@ -0,0 +1,12 @@
---
- name: Install required packages
when: ansible_distribution == "Fedora" or (ansible_distribution == "CentOS" and ansible_distribution_major_version == "8")
become: true
dnf:
state: latest
name:
- openldap-servers
- openldap-clients
- nss-pam-ldapd
- name: Configure

View File

@ -0,0 +1,36 @@
---
- name: Install Ovirt on CentOS 8
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
block:
- name: Install Ovirt repository
dnf:
state: latest
name: http://resources.ovirt.org/pub/yum-repo/ovirt-release43.rpm
- name: Update using the new repository
dnf:
state: latest
name: "*"
exclude: kernel*
- name: Install Ovirt
dnf:
state: latest
name: ovirt-engine
- name: Install Ovrit on CentOS 7
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
block:
- name: Install Ovirt repository
yum:
state: latest
name: http://resources.ovirt.org/pub/yum-repo/ovirt-release43.rpm
- name: Update using the new repository
yum:
state: latest
name: "*"
exclude: kernel*
- name: Install Ovirt
yum:
state: latest
name: ovirt-engine

0
_legacy/tasks/centos.yml Normal file
View File

View File

@ -0,0 +1,20 @@
---
- name: Install CentOS 8 python bindings
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
dnf:
state: latest
name:
- python3-libselinux
- python3-policycoreutils
- python3-firewall
- name: Install CentoOS 7 python bindings
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
yum:
state: latest
name:
- libselinux-python
- policycoreutils-python
- python-firewall

View File

@ -0,0 +1,8 @@
---
- name: Install systemd-networkd
become: true
yum:
state: latest
name:
- systemd-resolved
- systemd-networkd

View File

@ -0,0 +1,9 @@
---
- name: Install global packages using YUM
become: true
yum:
state: latest
name: "{{ item }}"
with_items:
- "{{ packages_global }}"
- "{{ packages_yum }}"

View File

@ -0,0 +1,31 @@
---
- name: Enable Extra Packages for Enterprise Linux
become: true
dnf_repository:
name: epel
description: Extra Packages for Enterprise Linux
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Install Extra Packages for Enterprise Linux GPG key
become: true
rpm_key:
state: present
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
- name: Disable yum subscription-manager
become: true
lineinfile:
regex: enabled=1
line: enabled=0
path: /etc/yum/pluginconf.d/subscription-manager.conf
create: yes
state: present
- name: Disable yum repo report upload
become: true
lineinfile:
regex: enabled=1
line: enabled=0
path: /etc/yum/pluginconf.d/enabled_repos_upload.conf
create: yes
state: present

View File

@ -0,0 +1,10 @@
---
- name: Install Fedora python bindings
when: ansible_distribution == "Fedora"
become: true
dnf:
state: latest
name:
- libselinux-python
- policycoreutils-python
- python3-firewall

View File

@ -0,0 +1,8 @@
---
- name: Install systemd-networkd
become: true
dnf:
state: latest
name:
- systemd-resolved
- systemd-networkd

View File

@ -0,0 +1,9 @@
---
- name: Install global packages using DNF
become: true
dnf:
state: latest
name: "{{ item }}"
with_items:
- "{{ packages_global }}"
- "{{ packages_dnf }}"

View File

@ -0,0 +1,55 @@
---
# The directory is deleted ahead of creation to ensure that no old configs
# remain after runnign ansible
- name: Delete networkd config directory
become: true
file:
path: /etc/systemd/network
state: absent
- name: Create the networkd config directory
become: true
file:
path: /etc/systemd/network
state: directory
- name: Make .network files
become: true
template:
src: network.j2
dest: "/etc/systemd/network/{{ item.key }}.network"
with_dict: "{{ networking }}"
- name: Configure systemd services
become: true
block:
- name: Disable network scripts and NetworkManager
service:
name: "{{ item }}"
enabled: false
with_items:
- network
- NetworkManager
- NetworkManager-wait-online
- name: Enable systemd-networkd and systemd-resolved
service:
name: "{{ item }}"
enabled: true
state: started
with_items:
- systemd-networkd
- systemd-resolved
- systemd-networkd-wait-online
- name: Symlink so systemd-resolved uses /etc/resolv.conf
file:
dest: /etc/resolv.conf
src: /run/systemd/resolve/resolv.conf
state: link
force: true
setype: net_conf_t
- name: Symlink so /etc/resolv.conf uses systemd
file:
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
src: /usr/lib/systemd/system/systemd-resolved.service
state: link
force: true

View File

@ -0,0 +1,22 @@
---
# The directory is deleted ahead of creation to ensure that no old configs
# remain after runnign ansible
- name: Delete networkd config directory
become: true
file:
path: /etc/systemd/network
state: absent
- name: Create the networkd config directory
become: true
file:
path: /etc/systemd/network
state: directory
- name: Make .network files
when: networking is defined
become: true
template:
src: network.j2
dest: "/etc/systemd/network/{{ item.key }}.network"
with_dict: "{{ networking }}"

View File

@ -1,19 +1,21 @@
--- ---
- name: Disable NetworkManager - name: Disable network scripts and NetworkManager
become: true become: true
systemd: service:
name: "{{ item }}" name: "{{ item }}"
enabled: false enabled: false
loop: with_items:
- network
- NetworkManager - NetworkManager
- NetworkManager-wait-online - NetworkManager-wait-online
- name: Enable systemd-networkd - name: Enable systemd-networkd and systemd-resolved
become: true become: true
systemd: service:
name: "{{ item }}" name: "{{ item }}"
enabled: true enabled: true
loop: state: started
with_items:
- systemd-networkd - systemd-networkd
- systemd-resolved - systemd-resolved
- systemd-networkd-wait-online - systemd-networkd-wait-online

View File

@ -0,0 +1,39 @@
---
- name: Load users variables
include_vars:
file: users.yml
- name: Reconcile user targets with host targets to get host users
set_fact:
users_local: >-
{{
users_local | default([]) + ([item] if item.targets | intersect(local_targets) else [])
}}
loop: "{{ users }}"
- name: Determine local user names
set_fact:
users_local_names: "{{ users_local_names | default([]) + [item.name] }}"
loop: "{{ users_local }}"
- name: Determine administrative users
set_fact:
users_local_admin: >-
{{
users_local_admin | default([]) + ([item] if item.admin | default(False) else [])
}}
loop: "{{ users_local }}"
- name: Determine existing users
shell: 'grep omni /etc/group | cut -d: -f4 | tr "," "\n"'
changed_when: false
register: users_local_existing
- name: Determine removed users
set_fact:
users_local_removed: >-
{{
users_local_removed | default([]) +
([item] if item not in users_local_names else [])
}}
loop: "{{ users_local_existing.stdout_lines }}"

View File

@ -0,0 +1,14 @@
---
- name: Install SSH Banner
become: true
template:
src: motd.j2
dest: /etc/issue.net
mode: 0644
- name: Configure SSH banner
become: true
lineinfile:
path: /etc/ssh/sshd_config
regexp: '#Banner none'
line: 'Banner /etc/issue.net'

View File

@ -0,0 +1,21 @@
- name: Turn off password authentication
become: true
replace:
path: /etc/ssh/sshd_config
regexp: "PasswordAuthentication yes"
replace: "PasswordAuthentication no"
- name: Turn off challenge response authentication
become: true
replace:
path: /etc/ssh/sshd_config
regexp: "ChallengeResponseAuthentication yes"
replace: "ChallengeResponseAuthentication no"
- name: Turn off GSSAPI authentication
become: true
replace:
path: /etc/ssh/sshd_config
regexp: "GSSAPIAuthentication yes"
replace: "GSSAPIAuthentication no"

1
_legacy/tasks/tasks Symbolic link
View File

@ -0,0 +1 @@
tasks

2
_legacy/vars/global.yml Normal file
View File

@ -0,0 +1,2 @@
---
ansible_user: ansible

27
_legacy/vars/network.yml Normal file
View File

@ -0,0 +1,27 @@
---
gateway: router.net.enp.one
dhcp:
- name: PUBLIC
subnet: 10.42.100.1/24
dns: 10.42.100.1
domain: tre2.local
lease: 21600
start: 10.42.100.26
stop: 10.42.100.254
- name: DOMAIN
subnet: 10.42.101.0/24
dns: 10.42.101.1
domain: net.enp.one
lease: 21600
start: 10.42.101.100
stop: 10.42.101.254
- name: SECURE
subnet: 10.42.102.0/24
dns: 10.42.102.1
domain: net.enp.one
lease: 3600
start: 10.42.102.50
stop: 10.42.102.254

28
_legacy/vars/packages.yml Normal file
View File

@ -0,0 +1,28 @@
---
packages_global:
- cmake
- curl
- gcc
- gcc-c++
- git
- libselinux-python
- make
- nano
- openssl-devel
- policycoreutils-python
- python-devel
- python-virtualenv
- systemd-devel
- unzip
- vim
- vim-minimal
packages_dnf:
- python3-devel
packages_yum:
- bash-completion
- bash-completion-extras
- nc
- nfs-utils
- wget

148
en1.yml
View File

@ -4,57 +4,125 @@ all:
ansible_user: ansible ansible_user: ansible
ansible_python_interpreter: /opt/ansible/bin/python ansible_python_interpreter: /opt/ansible/bin/python
omni_ansible_venv: /opt/ansible omni_ansible_venv: /opt/ansible
update: false omni_protected_users: ["root", "ansible"]
clean: false omni_domain: net.enp.one
omni_host_swarm_controller: jupiter
omni_host_webproxy: jupiter
children: children:
network:
hosts:
router:
ansible_host: en1-core.net.enp.one
ansible_network_os: edgeos
ansible_connection: network_cli
description: EN1 Core Gateway
targets: ["admin", "network"]
servers: servers:
children:
virtualization: {}
virtualization:
vars: vars:
omni_local_hosts: omni_os:
- hostname: jupiter.svr.local name: centos
ip: 192.168.42.10 version_major: "8"
- hostname: remus.svr.local
ip: 192.168.42.20
- hostname: romulus.svr.local
ip: 192.168.42.30
hosts: hosts:
jupiter: jupiter:
ansible_host: jupiter.net.enp.one ansible_host: jupiter.net.enp.one
omni_description: EN1 System Control Server omni_description: EN1 System Control Server
omni_local_targets: ["core", "network"]
omni_docker_swarm_iface: eno2 omni_docker_swarm_iface: eno2
omni_networking: omni_networking:
eno1: eno1:
dhcp: true dhcp: true
dhcp_address: 10.42.101.10/42
eno2: eno2:
dhcp: false dhcp: false
addresses: ["192.168.42.10/24"] addresses: ["192.168.42.10/24"]
children: remus:
worker: ansible_host: remus.net.enp.one
hosts: omni_description: EN1 Hypervisor/Datastore
remus: omni_local_targets: ["core", "vms"]
ansible_host: remus.net.enp.one omni_docker_swarm_iface: eno2
omni_description: EN1 Hypervisor/Datastore omni_networking:
omni_networking: eno1:
eno1: dhcp: true
dhcp: true eno2:
dhcp_address: 10.42.101.20/24 dhcp: false
eno2: addresses: ["192.168.42.20/24"]
dhcp: false romulus:
addresses: ["192.168.42.20/24"] ansible_host: romulus.net.enp.one
romulus: omni_description: EN1 Hypervisor/Datastore
ansible_host: romulus.net.enp.one omni_local_targets: ["core", "vms"]
omni_description: EN1 Hypervisor/Datastore omni_docker_swarm_iface: eno2
omni_networking: omni_networking:
eno1: eno1:
dhcp: true dhcp: true
dhcp_address: 10.42.101.30/24 eno2:
eno2: dhcp: false
dhcp: false addresses: ["192.168.42.20/24"]
addresses: ["192.168.42.30/24"] # novis:
# ansible_host: novis.tre2.local
# description: EN1 Backup Storage
# local_targets: ["core", "datastore"]
# children:
# vms:
# vars:
# disable_sudo_password: true
# required_os: centos_8
# hosts:
# gitea:
# ansible_host: vm-host-gitea.net.enp.one
# description: "Application Host: Gitea VCS"
# local_targets: ["admin", "vcs"]
# networking:
# eth0:
# dhcp: true
# plex:
# ansible_host: vm-host-plex.net.enp.one
# description: "Application Host: Plex Media Server"
# local_targets: ["admin", "plx"]
# networking:
# eth0:
# dhcp: true
# bitwarden:
# ansible_host: vm-host-bitwarden.net.enp.one
# description: "Application Host: Bitwarden Password Manager"
# local_targets: ["admin", "ssv"]
# networking:
# eth0:
# dhcp: true
# nextcloud:
# ansible_host: vm-host-nextcloud.net.enp.one
# description: "Application Host: Nextcloud Web Storage"
# local_targets: ["admin", "cfs"]
# networking:
# eth0:
# dhcp: true
# workstations:
# vars:
# enable_gui: true
# enable_ssh: false
# enable_networkd: false
# hosts:
# omega:
# ansible_host: localhost
# description: Last
# required_os: centos_7
# local_targets: ["admin", "recovery"]
# vigil-nox:
# ansible_host: localhost
# required_os: fedora_31
# description: Watchman
# local_targets: ["admin", "desktop"]
# serico-nox:
# ansible_host: localhost
# description: Silk
# required_os: fedora_31
# local_targets: ["admin", "desktop"]
# inerro:
# ansible_host: localhost
# description: Wanderer
# required_os: fedora_31
# local_targets: ["admin", "desktop"]
# network:
# hosts:
# router:
# ansible_host: router.net.enp.one
# ansible_network_os: edgeos
# ansible_connection: network_cli
# description: EN1 Core Gateway
# targets: ["admin", "network"]

View File

@ -0,0 +1,78 @@
---
- name: Configure system authentication
hosts: all
roles:
- role: sshd
tasks:
- import_tasks: tasks/preprocess-users.yml
- name: Create local user accounts
tags: users_create
become: true
block:
- name: Create groups
group:
name: "{{ item }}"
state: present
loop: "{{ omni_local_targets + ['omni'] }}"
- name: Load user passwords
include_vars:
file: secrets/passwords.yml
- name: Create users
user:
name: "{{ item.name }}"
comment: "{{ item.fullname | default('') }}"
shell: /bin/bash
groups: "{{ item.targets | intersect(omni_local_targets) + ['omni'] }}"
system: "{{ item.svc | default(false) }}"
state: present
generate_ssh_key: false
password: "{{ omni_users_secrets[item.name] }}"
loop: "{{ _users_local }}"
- name: Delete removed user accounts
become: true
user:
name: "{{ item }}"
state: absent
loop: "{{ _users_local_removed | default([]) | difference(omni_protected_users) }}"
- name: Grant sudo permissions to admin user accounts
become: true
user:
name: "{{ item.name }}"
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
state: present
loop: "{{ _users_local_admin }}"
- name: Disable sudo password for ansible
become: true
lineinfile:
create: true
path: /etc/sudoers.d/30-ansible
line: "ansible ALL=(ALL) NOPASSWD:ALL"
mode: 0644
- name: Disable sudo password for admin users
become: true
lineinfile:
create: true
path: /etc/sudoers.d/40-admin
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
mode: 0644
state: "{{ 'present' if omni_disable_sudo_password | default(false) | bool == true else 'absent' }}"
loop: "{{ _users_local_admin }}"
- name: Ensure proper ownership of user home directories
become: true
file:
group: "{{ item.name }}"
owner: "{{ item.name }}"
path: /home/{{ item.name }}
recurse: true
state: directory
loop: "{{ _users_local }}"
- import_tasks: tasks/deploy-ssh-keys.yml

View File

@ -0,0 +1,69 @@
---
- name: Configure docker hosts
hosts: servers
roles:
- role: docker
tasks:
- name: Allow swarm traffic through the firewall
become: true
firewalld:
state: enabled
service: docker-swarm
zone: public
permanent: true
immediate: true
- name: Disable firewall on docker bridge interface
become: true
firewalld:
state: enabled
interface: docker0
zone: trusted
permanent: true
immediate: true
- name: Configure swarm master
hosts: jupiter
tasks:
- name: Configure portainer volume
docker_volume:
volume_name: portainer
- name: Run portainer
docker_container:
name: omni.portainer
image: portainer/portainer
restart_policy: unless-stopped
published_ports:
- 0.0.0.0:8000:8000
- 0.0.0.0:9000:9000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- portainer:/data
- name: Initialize swarm
docker_swarm:
state: present
advertise_addr: "{{ omni_docker_swarm_iface }}"
- name: Set swarm master to DRAIN
docker_node:
hostname: "{{ ansible_host }}"
availability: drain
- name: Configure swarm nodes
hosts:
- remus
- romulus
tags: docker-nodes
tasks:
- name: Fetch docker swarm information
delegate_to: jupiter
docker_swarm_info:
register: _swarm_info
- name: Join workers to swarm
docker_swarm:
state: join
remote_addrs: ["jupiter.svr.local"]
join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
advertise_addr: "{{ omni_docker_swarm_iface }}"

View File

@ -7,83 +7,21 @@
hostname: hostname:
name: "{{ ansible_host }}" name: "{{ ansible_host }}"
- import_tasks: tasks/sshd/banner.yml - import_tasks: tasks/preprocess-users.yml
- name: Install global bash components - name: Install network bash profile
become: true become: true
copy: copy:
src: bash/{{ item }}.sh src: bashrc.sh
dest: /etc/profile.d/Z-{{ 10 + loop_index }}-enpn-{{ item }}.sh dest: /home/{{ item.name }}/.bashrc
mode: 0644 mode: 0644
loop: loop: "{{ _users_local }}"
- global
- pyenv
- aliases
- helpers
loop_control:
index_var: loop_index
label: "{{ item }}"
- name: Disable dynamic MOTD
become: true
replace:
path: /etc/pam.d/sshd
regexp: "^session\\s+optional\\s+pam_motd\\.so.*$"
replace: "#session optional pam_motd.so"
- name: Remove legacy global bashrc
become: true
file:
path: /etc/profile.d/ZA-enpn-bashrc.sh
state: absent
- name: Disable case-sensitive autocomplete - name: Disable case-sensitive autocomplete
become: true become: true
lineinfile: lineinfile:
path: /etc/inputrc path: /home/{{ item.name }}/.inputrc
line: set completion-ignore-case ((o|O)(n|ff)) line: set completion-ignore-case On
create: true create: true
mode: 0644 mode: 0644
loop: "{{ _users_local }}"
- name: Configure additional security settings on shared servers
hosts: servers
tasks:
- name: Identify local home directories
become: true
find:
file_type: directory
path: /home/
recurse: false
register: _local_home_dirs
- name: Determine files to write-protect
set_fact:
_secure_files: >-
{{ _secure_files | default([]) + [
item.path ~ '/.bashrc',
item.path ~ '/.bash_profile',
item.path ~ '/.ssh/authorized_keys',
item.path ~ '/.ssh/config'
] }}
loop: "{{ _local_home_dirs.files }}"
loop_control:
label: "{{ item.path }}"
- name: Fetch status of secure files
become: true
stat:
path: "{{ item }}"
loop: "{{ _secure_files }}"
loop_control:
label: "{{ item }}"
register: _secure_file_stats
- name: Restrict access to secure files
become: true
file:
path: "{{ item.item }}"
state: "{{ 'file' if item.stat.exists else 'touch' }}"
mode: 0400
loop: "{{ _secure_file_stats.results }}"
loop_control:
label: "Write-protecting: {{ item.item }}"

View File

@ -1,164 +0,0 @@
---
- name: Configure server management services
hosts: servers
tasks:
- import_tasks: tasks/sshd/secure.yml
- name: Enable cockpit
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
systemd:
name: cockpit.socket
enabled: true
state: started
- name: Configure virtualization management services
hosts: virtualization
tasks:
- name: Create docker group
become: true
group:
name: docker
state: present
- name: Configure local accounts
hosts: all
vars_files:
- vars/accounts.yml
- vars/secrets/passwords.yml
- vars/sshkeys.yml
tasks:
- name: Create omni group
become: true
group:
name: "{{ omni_group.name }}"
gid: "{{ omni_group.gid }}"
state: present
- name: Determine existing omni users
changed_when: false
shell:
cmd: 'grep omni /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
register: _existing_omni_users
- name: Delete removed user accounts
become: true
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
user:
name: "{{ item }}"
state: absent
loop: "{{ _existing_omni_users.stdout_lines }}"
- name: Delete removed user groups
become: true
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
group:
name: "{{ item }}"
state: absent
loop: "{{ _existing_omni_users.stdout_lines }}"
- name: Delete removed user home directories
become: true
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
file:
path: "/home/{{ item }}"
state: absent
loop: "{{ _existing_omni_users.stdout_lines }}"
- name: Create account groups
become: true
group:
name: "{{ item.name }}"
gid: "{{ item.uid }}"
state: present
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create accounts
become: true
user:
name: "{{ item.name }}"
state: present
uid: "{{ item.uid }}"
group: "{{ item.name }}"
groups: >-
{{
[omni_group.name] +
(['wheel' if ansible_os_family | lower == 'redhat' else 'sudo'] if item.admin | default(false) else []) +
(['docker' if 'virtualization' in group_names else omni_group.name] if item.admin | default(false) else [])
}}
# The 'else omni_group.name' above is just some non-breaking value to cover the
# false condition, it doesn't have special meaning
comment: "{{ item.fullname | default('') }}"
shell: "{{ '/bin/bash' if 'mgmt' in item.targets else '/bin/false' }}"
system: "{{ item.svc | default(false) }}"
generate_ssh_key: false
password: "{{ omni_users_secrets[item.name] | default(none) }}"
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Disable sudo password for ansible
become: true
lineinfile:
create: true
path: /etc/sudoers.d/30-ansible
line: "ansible ALL=(ALL) NOPASSWD:ALL"
mode: 0644
- name: Ensure proper ownership of user home directories
become: true
file:
path: /home/{{ item.name }}
state: directory
group: "{{ item.name }}"
owner: "{{ item.name }}"
mode: 0700
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce root password
become: true
user:
name: root
password: "{{ omni_users_secrets.root }}"
state: present
- name: Create SSH directory
become: true
file:
path: /home/{{ item.name }}/.ssh
owner: "{{ item.name }}"
group: "{{ item.name }}"
state: directory
mode: 0755
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Update authorized keys
become: true
when: "'mgmt' in item.targets"
authorized_key:
user: "{{ item.name }}"
key: "{{ omni_ssh_keys[item.name] | join('\n') }}"
state: present
exclusive: true
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce ownership of authorized keys
become: true
when: "'mgmt' in item.targets"
file:
path: /home/{{ item.name }}/.ssh/authorized_keys
state: file
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: 0400
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"

View File

@ -1,34 +1,35 @@
--- ---
# - name: Configure router - name: Configure router
# hosts: router hosts: router
# gather_facts: false gather_facts: false
# pre_tasks: pre_tasks:
# - name: Collect EdgeOS facts - name: Collect EdgeOS facts
# edgeos_facts: edgeos_facts:
# gather_subset: "!config" gather_subset: "!config"
# tasks:
# - name: Configure interfaces
# edgeos_config:
# lines:
# - set interfaces ethernet eth0 address dhcp
# - set interfaces ethernet eth0 description EXTERNAL
# - set interfaces ethernet eth1 address 10.42.100.1/24
# - set interfaces ethernet eth1 address 10.42.99.1/24
# - set interfaces ethernet eth1 description LOCAL
# - set interfaces ethernet eth2 address 10.42.101.1/24
# - set interfaces ethernet eth2 description DOMAIN
- name: Configure server networking
hosts: servers
tasks: tasks:
- import_tasks: tasks/networkd/install.yml - name: Configure interfaces
- import_tasks: tasks/networkd/configure.yml edgeos_config:
- import_tasks: tasks/networkd/services.yml lines:
- set interfaces ethernet eth0 address dhcp
- set interfaces ethernet eth0 description EXTERNAL
- set interfaces ethernet eth1 address 10.42.100.1/24
- set interfaces ethernet eth1 address 10.42.99.1/24
- set interfaces ethernet eth1 description LOCAL
- set interfaces ethernet eth2 address 10.42.101.1/24
- set interfaces ethernet eth2 description DOMAIN
- name: Configure servers
hosts: servers
roles:
- role: networkd
tasks:
- name: Configure local hostsfile - name: Configure local hostsfile
become: true become: true
lineinfile: lineinfile:
path: /etc/hosts path: /etc/hosts
state: present state: present
line: "{{ item.ip }} {{ item.hostname }}" line: "{{ item }}"
loop: "{{ omni_local_hosts | default([]) }}" loop:
- "192.168.42.10 jupiter.svr.local"
- "192.168.42.20 remus.svr.local"
- "192.168.42.30 romulus.svr.local"

View File

@ -1,37 +0,0 @@
---
# TBW
# - name: Install Nginx
# hosts: jupiter
# handlers:
# - name: restart_nginx
# become: true
# systemd:
# name: nginx
# state: restarted
# tasks:
# - name: Install nginx and certbot
# become: true
# dnf:
# name:
# - nginx
# - certbot
# - python3-certbot-nginx
# state: present
#
# - name: Enable and start nginx
# become: true
# systemd:
# name: nginx
# state: started
# enabled: true
#
# - name: Install configuration
# become: true
# copy:
# src: nginx.conf
# dest: /etc/nginx/nginx.conf
# notify:
# - restart_nginx
#
# # sudo setsebool -P httpd_can_network_connect on

32
playbooks/configure.yml Normal file
View File

@ -0,0 +1,32 @@
---
- import_playbook: meta.yml
- name: Configure system settings
hosts: all
pre_tasks:
- import_tasks: tasks/centos-8-kernelplus.yml
tags: kernel
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
roles:
- role: packages
vars:
omni_pkg_clean: true
- role: sshd
vars:
omni_restart_services: true
tasks:
- name: Enable cockpit
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
systemd:
name: cockpit
enabled: true
state: started
- import_playbook: configure-auth.yml
- import_playbook: configure-env.yml
- import_playbook: configure-network.yml
- import_playbook: configure-docker.yml

67
playbooks/deploy-docs.yml Normal file
View File

@ -0,0 +1,67 @@
---
- hosts: nimbus-1.net.enp.one
name: Deploy documentation
vars:
# Local directory to use for cloning and building the documentation site
DIR_BUILD: /tmp/docs
# Remote directory to install the site at
DIR_DEPLOY: /usr/share/nginx/doc.enp.one/html
tasks:
- name: Build the static site locally
delegate_to: 127.0.0.1
block:
- name: Ensure the build directory does not exist
file:
path: "{{ DIR_BUILD }}"
state: absent
- name: Clone documentation repository
git:
repo: git@vcs.enp.one:omni/omni-docs.git
dest: "{{ DIR_BUILD }}/"
- name: Generate build env requirements file
# Generate the requirements.txt style format, pipe through grep to remove
# the index line (not sure why thats included at all tbh) and save the
# result in "requirements.txt" to usage with pip
shell: pipenv lock --requirements | grep --invert-match "\-i">requirements.txt
args:
chdir: "{{ DIR_BUILD }}/"
- name: Create build env and install requirements
pip:
requirements: "{{ DIR_BUILD }}/requirements.txt"
virtualenv: "{{ DIR_BUILD }}/venv"
virtualenv_python: python3
state: present
- name: Build the static site using mkdocs
shell: "{{ DIR_BUILD }}/venv/bin/mkdocs build"
args:
chdir: "{{ DIR_BUILD }}"
- name: Upload static site to remote
copy:
src: "{{ DIR_BUILD }}/site/"
dest: "/tmp/docs/"
- name: Remove legacy site
become: true
file:
path: "{{ DIR_DEPLOY }}"
state: absent
- name: Copy static site to deployment directory
become: true
copy:
src: "/tmp/docs/"
dest: "{{ DIR_DEPLOY }}"
remote_src: true
owner: root
group: nginx
mode: 0755
setype: httpd_sys_content_t
- name: Clean up local build directory
delegate_to: 127.0.0.1
file:
path: "{{ DIR_BUILD }}"
state: absent
- name: Clean up remote temp directory
file:
path: /tmp/docs
state: absent

View File

@ -0,0 +1,38 @@
---
- hosts: nimbus-1.net.enp.one
name: Deploy main landing page at enpaul.net
vars:
# Local directory to use for cloning and building the documentation site
DIR_BUILD: /tmp/docs
# Remote directory to install the site at
DIR_DEPLOY: /usr/share/nginx/enpaul.net/html
tasks:
- name: Upload static site to remote
copy:
src: "{{ DIR_BUILD }}/site/"
dest: "/tmp/docs/"
- name: Remove legacy site
become: true
file:
path: "{{ DIR_DEPLOY }}"
state: absent
- name: Copy static site to deployment directory
become: true
copy:
src: "/tmp/docs/"
dest: "{{ DIR_DEPLOY }}"
remote_src: true
owner: root
group: nginx
mode: 0755
setype: httpd_sys_content_t
- name: Clean up local build directory
delegate_to: 127.0.0.1
file:
path: "{{ DIR_BUILD }}"
state: absent
- name: Clean up remote temp directory
file:
path: /tmp/docs
state: absent

View File

@ -0,0 +1,78 @@
---
- name: Configure local users
hosts: all:!network
tags:
- auth
- ssh
- users
tasks:
- import_tasks: tasks/preprocess-local-users.yml
- name: Create local user accounts
tags: users_create
become: true
block:
- name: Create groups
group:
name: "{{ item }}"
state: present
loop: "{{ local_targets + ['omni'] }}"
- name: Load user passwords
include_vars:
file: secrets/passwords.yml
- name: Create users
user:
name: "{{ item.name }}"
comment: "{{ item.fullname | default('') }}"
shell: /bin/bash
groups: "{{ item.targets | intersect(local_targets) + ['omni'] }}"
system: "{{ item.svc | default(False) }}"
state: present
generate_ssh_key: false
password: "{{ users_secrets[item.name] }}"
loop: "{{ users_local }}"
- name: Delete removed user accounts
become: true
user:
name: "{{ item }}"
state: absent
loop: "{{ users_local_removed | default([]) | difference(protected_users) }}"
- name: Grant sudo permissions to admin user accounts
become: true
user:
name: "{{ item.name }}"
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
state: present
loop: "{{ users_local_admin }}"
- name: Disable sudo password for ansible
become: true
lineinfile:
create: true
path: /etc/sudoers.d/30-ansible
line: "ansible ALL=(ALL) NOPASSWD:ALL"
mode: 0644
- name: Disable sudo password for admin users
become: true
lineinfile:
create: true
path: /etc/sudoers.d/40-admin
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
mode: 0644
state: "{{ 'present' if disable_sudo_password | bool == true else 'absent' }}"
loop: "{{ users_local_admin }}"
- name: Ensure proper ownership of user home directories
become: true
file:
group: "{{ item.name }}"
owner: "{{ item.name }}"
path: /home/{{ item.name }}
recurse: true
state: directory
loop: "{{ users_local }}"

View File

@ -0,0 +1,33 @@
---
- name: Deploy plex container
hosts: remus
tasks:
- name: Create world volume
docker_volume:
name: minecraft
driver: local
state: present
recreate: never
- name: Launch minecraft server container
docker_container:
name: mcs
state: started
image: itzg/minecraft-server
recreate: "{{ omni_update_minecraft | default(false) | bool }}"
volumes:
- minecraft:/data
published_ports:
- "25565:25565/tcp"
env:
EULA: "TRUE"
VERSION: 1.15.2
MAX_MEMORY: "8G"
MOTD: "A home for buttery companions"
MODE: survival
OPS: ScifiGeek42
WHITELIST: "ScifiGeek42,fantasycat256,CoffeePug,Snowdude21325,KaiserSJR,glutenfreebean"
MAX_BUILD_HEIGHT: "512"
SNOOPER_ENABLED: "false"
ICON: https://cdn.enp.one/img/logos/e-w-sm.png
ENABLE_RCON: "false"

44
playbooks/deploy-plex.yml Normal file
View File

@ -0,0 +1,44 @@
---
- name: Deploy plex container
hosts: remus
tasks:
- name: Create plex metadata volume
docker_volume:
name: plexmeta
driver: local
state: present
recreate: never
- name: Create plex NFS media volume
docker_volume:
name: plexdata
driver: local
state: present
recreate: never
driver_options:
type: nfs
o: "addr=plexistore.tre2.local,ro"
device: ":/nfs/plex"
- name: Allow plex access through the firewall
become: true
firewalld:
state: enabled
service: plex
permanent: true
immediate: true
- name: Launch plex container
docker_container:
name: pms
state: started
image: plexinc/pms-docker:latest
pull: true
recreate: "{{ omni_update_plex | default(false) | bool }}"
network_mode: host
volumes:
- plexmeta:/config
- plexdata:/data:ro
env:
TZ: America/New_York
ALLOWED_NETWORKS: 10.42.100.0/24,10.42.101.0/24

View File

@ -0,0 +1,22 @@
---
- name: Update ssh keys on all devices
hosts: all
tasks:
- import_tasks: tasks/preprocess-local-users.yml
- name: Ensure SSH directory exists
become: true
file:
state: directory
path: /home/{{ item.name }}/.ssh
loop: "{{ users_local }}"
- name: Put keys on remote
become: true
when: item.keys != []
authorized_key:
user: "{{ item.name }}"
key: "{{ item.sshkeys | join('\n') }}"
state: present
exclusive: yes
loop: "{{ users_local }}"

View File

@ -1 +0,0 @@
../resources

63
playbooks/files/bashrc.sh Normal file
View File

@ -0,0 +1,63 @@
# Global network bashrc/profile file
# Updated 2020-03-18
function parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]$(parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
function venv() {
DIR="/home/$USERNAME/.venvs"
if [ $# -eq 0 ]; then
echo "No command specified"
elif [ $1 = "--help" ] || [ $1 = '-h' ]; then
echo "Custom python Virtualenv manager
\"Because pipenv is too hard and everything else sucks\"
Commands:
list List available virtualenvs
show Alias of list
delete <venv> Delete a virtualenv
del Alias of delete
rm Alias of delete
load <venv> Activate a virtualenv for usage
new <venv> <python> Create a new virtualenv. If <python> is not specified,
then the system default python is used
"
elif [ $1 = "list" ] || [ $1 = "show" ] || [ $1 = "ls" ]; then
ls $DIR
elif [ $1 = "load" ]; then
. $DIR/$2/bin/activate
elif [ $1 = "new" ]; then
virtualenv $DIR/$2 --python=$3
elif [ $1 = "delete" ] || [ $1 = "del" ] || [ $1 = "rm" ]; then
rm -rf $DIR/$2
elif [ $1 = "go" ]; then
cd $DIR/$2
fi
}
function up() { cd $(eval printf '../'%.0s {1..$1}); }
function pipin() { pip freeze | grep $1; }
alias bk='cd -'
alias fuck='sudo $(history -p \!\!)'
alias doc='cd ~/Documents'
alias dn='cd ~/Downloads'
alias version='uname -orp && lsb_release -a | grep Description'
alias activate='source ./bin/activate'
alias cls='clear'
alias mklink='ln -s'
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
alias gg='cd ~/Git'
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
alias date='/usr/bin/date --iso-8601=seconds'
alias whatismyip='curl https://icanhazip.com/'
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
alias epoch="python3 -c 'import time; print(time.time());'"
export rc=/home/$USERNAME/.bashrc

View File

@ -0,0 +1,12 @@
alias powerline='/opt/powerline/bin/powerline'
alias powerline-config='/opt/powerline/bin/powerline-config'
alias powerline-daemon='/opt/powerline/bin/powerline-daemon'
alias powerline-lint='/opt/powerline/bin/powerline-lint'
alias powerline-render='/opt/powerline/bin/powerline-render'
if [ -z ${DISABLE_POWERLINE} ]; then
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
source /opt/powerline/powerline.sh
fi

View File

@ -0,0 +1,153 @@
_powerline_columns_fallback() {
if which stty &>/dev/null ; then
local cols="$(stty size 2>/dev/null)"
if ! test -z "$cols" ; then
echo "${cols#* }"
return 0
fi
fi
echo 0
return 0
}
_powerline_tmux_pane() {
echo "${TMUX_PANE:-`TMUX="$_POWERLINE_TMUX" tmux display -p "#D"`}" | \
tr -d ' %'
}
_powerline_tmux_setenv() {
TMUX="$_POWERLINE_TMUX" tmux setenv -g TMUX_"$1"_`_powerline_tmux_pane` "$2"
TMUX="$_POWERLINE_TMUX" tmux refresh -S
}
_powerline_tmux_set_pwd() {
if test "$_POWERLINE_SAVED_PWD" != "$PWD" ; then
_POWERLINE_SAVED_PWD="$PWD"
_powerline_tmux_setenv PWD "$PWD"
fi
}
_powerline_return() {
return $1
}
_POWERLINE_HAS_PIPESTATUS="$(
_powerline_return 0 | _powerline_return 43
test "${PIPESTATUS[*]}" = "0 43"
echo "$?"
)"
_powerline_has_pipestatus() {
return $_POWERLINE_HAS_PIPESTATUS
}
_powerline_status_wrapper() {
local last_exit_code=$? last_pipe_status=( "${PIPESTATUS[@]}" )
if ! _powerline_has_pipestatus \
|| test "${#last_pipe_status[@]}" -eq "0" \
|| test "$last_exit_code" != "${last_pipe_status[$(( ${#last_pipe_status[@]} - 1 ))]}" ; then
last_pipe_status=()
fi
"$@" $last_exit_code "${last_pipe_status[*]}"
return $last_exit_code
}
_powerline_add_status_wrapped_command() {
local action="$1" ; shift
local cmd="$1" ; shift
full_cmd="_powerline_status_wrapper $cmd"
if test "$action" = "append" ; then
PROMPT_COMMAND="$PROMPT_COMMAND"$'\n'"$full_cmd"
else
PROMPT_COMMAND="$full_cmd"$'\n'"$PROMPT_COMMAND"
fi
}
_powerline_tmux_set_columns() {
_powerline_tmux_setenv COLUMNS "${COLUMNS:-`_powerline_columns_fallback`}"
}
_powerline_init_tmux_support() {
if test -n "$TMUX" && tmux refresh -S &>/dev/null ; then
# TMUX variable may be unset to create new tmux session inside this one
_POWERLINE_TMUX="$TMUX"
trap '_powerline_tmux_set_columns' WINCH
_powerline_tmux_set_columns
test "$PROMPT_COMMAND" != "${PROMPT_COMMAND/_powerline_tmux_set_pwd}" \
|| _powerline_add_status_wrapped_command append _powerline_tmux_set_pwd
fi
}
_powerline_local_prompt() {
# Arguments:
# 1: side
# 2: renderer_module arg
# 3: last_exit_code
# 4: last_pipe_status
# 5: jobnum
# 6: local theme
"$POWERLINE_COMMAND" $POWERLINE_COMMAND_ARGS shell $1 \
$2 \
--last-exit-code=$3 \
--last-pipe-status="$4" \
--jobnum=$5 \
--renderer-arg="client_id=$$" \
--renderer-arg="local_theme=$6"
}
_powerline_prompt() {
# Arguments:
# 1: side
# 2: last_exit_code
# 3: last_pipe_status
# 4: jobnum
"$POWERLINE_COMMAND" $POWERLINE_COMMAND_ARGS shell $1 \
--width="${COLUMNS:-$(_powerline_columns_fallback)}" \
-r.bash \
--last-exit-code=$2 \
--last-pipe-status="$3" \
--jobnum=$4 \
--renderer-arg="client_id=$$"
}
_powerline_set_prompt() {
local last_exit_code=$1 ; shift
local last_pipe_status=$1 ; shift
local jobnum="$(jobs -p|wc -l)"
PS1="$(_powerline_prompt aboveleft $last_exit_code "$last_pipe_status" $jobnum)"
if test -n "$POWERLINE_SHELL_CONTINUATION$POWERLINE_BASH_CONTINUATION" ; then
PS2="$(_powerline_local_prompt left -r.bash $last_exit_code "$last_pipe_status" $jobnum continuation)"
fi
if test -n "$POWERLINE_SHELL_SELECT$POWERLINE_BASH_SELECT" ; then
PS3="$(_powerline_local_prompt left '' $last_exit_code "$last_pipe_status" $jobnum select)"
fi
}
_powerline_setup_prompt() {
VIRTUAL_ENV_DISABLE_PROMPT=1
if test -z "${POWERLINE_COMMAND}" ; then
POWERLINE_COMMAND="$("$POWERLINE_CONFIG_COMMAND" shell command)"
fi
test "$PROMPT_COMMAND" != "${PROMPT_COMMAND%_powerline_set_prompt*}" \
|| _powerline_add_status_wrapped_command prepend _powerline_set_prompt
PS2="$(_powerline_local_prompt left -r.bash 0 0 0 continuation)"
PS3="$(_powerline_local_prompt left '' 0 0 0 select)"
}
if test -z "${POWERLINE_CONFIG_COMMAND}" ; then
if which powerline-config >/dev/null ; then
POWERLINE_CONFIG_COMMAND=powerline-config
else
POWERLINE_CONFIG_COMMAND="$(dirname "$BASH_SOURCE")/../../../scripts/powerline-config"
fi
fi
if "${POWERLINE_CONFIG_COMMAND}" shell --shell=bash uses prompt ; then
_powerline_setup_prompt
fi
if "${POWERLINE_CONFIG_COMMAND}" shell --shell=bash uses tmux ; then
_powerline_init_tmux_support
fi

View File

@ -0,0 +1,53 @@
{
"common": {
"term_truecolor": false
},
"ext": {
"ipython": {
"colorscheme": "default",
"theme": "in",
"local_themes": {
"rewrite": "rewrite",
"out": "out",
"in2": "in2"
}
},
"pdb": {
"colorscheme": "default",
"theme": "default"
},
"shell": {
"colorscheme": "default",
"theme": "default_leftonly",
"local_themes": {
"continuation": "continuation",
"select": "select"
}
},
"tmux": {
"colorscheme": "default",
"theme": "default"
},
"vim": {
"colorscheme": "default",
"theme": "default",
"local_themes": {
"__tabline__": "tabline",
"cmdwin": "cmdwin",
"help": "help",
"quickfix": "quickfix",
"powerline.matchers.vim.plugin.nerdtree.nerdtree": "plugin_nerdtree",
"powerline.matchers.vim.plugin.commandt.commandt": "plugin_commandt",
"powerline.matchers.vim.plugin.gundo.gundo": "plugin_gundo",
"powerline.matchers.vim.plugin.gundo.gundo_preview": "plugin_gundo-preview"
}
},
"wm": {
"colorscheme": "default",
"theme": "default",
"update_interval": 2
}
}
}

View File

@ -1,128 +0,0 @@
---
- name: Bootstrap remote ansible environment
hosts: all
tags:
- always
vars:
# Set this fact to allow the bootstrap play to run using the native system python
# interpreter. A variable defined here is only in scope while this specific play
# is being run; once this play is done this value is dropped and the default value
# (which is actually set in the inventory file to the interpreter created by this
# play) will be used.
ansible_python_interpreter: /usr/bin/python3
tasks:
- name: Determine runtime settings
set_fact:
_runtime_clean: "{{ true if (clean | bool) else false }}"
_runtime_update: "{{ true if (update | bool) else false }}"
_runtime_update_state: "{{ 'latest' if (update | bool) else 'present' }}"
- name: Clean bootstrap virtualenv
when: _runtime_clean
become: true
file:
path: "{{ omni_ansible_venv }}"
state: absent
- name: Create bootstrap virtualenv directory
become: true
file:
path: "{{ omni_ansible_venv }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0755
- name: Create bootstrap virtualenv
command:
cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
creates: "{{ omni_ansible_venv }}/bin/python"
# - name: Assign ownership of the virtualenv to ansible
# become: true
# file:
# path: "{{ omni_ansible_venv }}"
# state: directory
# owner: "{{ ansible_user }}"
# group: "{{ ansible_user }}"
# mode: 0755
# follow: false
- name: Generate remote requirements file locally
delegate_to: 127.0.0.1
command:
cmd: poetry export --format requirements.txt
changed_when: false
register: _poetry_requirements
- name: Copy remote requirements file
blockinfile:
path: "{{ omni_ansible_venv }}/req.txt"
create: true
block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
mode: 0644
- name: Install remote requirements
pip:
executable: "{{ omni_ansible_venv }}/bin/pip"
requirements: "{{ omni_ansible_venv }}/req.txt"
state: present
- name: Install CentOS 8 python bindings
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
dnf:
state: "{{ _runtime_update_state }}"
name:
- python3-libselinux
- python3-policycoreutils
- python3-firewall
- name: Install CentOS 7 python bindings
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
yum:
state: "{{ _runtime_update_state }}"
name:
- libselinux-python
- policycoreutils-python
- python-firewall
- name: Install Fedora python bindings
when: ansible_distribution == "Fedora"
become: true
dnf:
state: "{{ _runtime_update_state }}"
name:
- libselinux-python
- policycoreutils-python
- python3-firewall
- name: Check meta environment
hosts: all
tags:
- always
tasks:
- name: Check required operating system
when: omni_os is defined
assert:
that:
- omni_os.name == ansible_distribution | lower
- omni_os.version_major == ansible_distribution_major_version
fail_msg: >-
Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}',
expected '{{ omni_os.name }} {{ omni_os.version_major }}'
success_msg: >-
Remote is running expected OS '{{ ansible_distribution }}
{{ ansible_distribution_major_version }}'
- name: Check required interpreter settings
assert:
that:
- ansible_python_interpreter.startswith(omni_ansible_venv) is true
fail_msg: >-
Interpreter '{{ ansible_python_interpreter }}'
is not in the expected venv '{{ omni_ansible_venv }}'
success_msg: Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"

72
playbooks/meta.yml Normal file
View File

@ -0,0 +1,72 @@
---
- name: Bootstrap remote ansible environment
hosts: all
gather_facts: false
become: true
tags:
- always
- meta
vars:
ansible_python_interpreter: /usr/bin/python3
tasks:
- name: Clean bootstrap virtualenv
when: omni_force_reinstall is defined
file:
path: "{{ omni_ansible_venv }}"
state: absent
- name: Create bootstrap virtualenv
command:
cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
creates: "{{ omni_ansible_venv }}/bin/python"
- name: Generate remote requirements file locally
become: false
delegate_to: 127.0.0.1
command:
cmd: poetry export --format requirements.txt
changed_when: false
register: _poetry_requirements
- name: Copy remote requirements file
blockinfile:
path: "{{ omni_ansible_venv }}/req.txt"
create: true
block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
- name: Install remote requirements
pip:
executable: "{{ omni_ansible_venv }}/bin/pip"
requirements: "{{ omni_ansible_venv }}/req.txt"
state: present
- name: Assign ownership of the virtualenv to ansible
file:
path: "{{ omni_ansible_venv }}"
state: directory
owner: ansible
group: ansible
recurse: true
follow: false
- name: Check meta environment
hosts: all
tags:
- always
- meta
tasks:
- name: Check required operating system
when: omni_os is defined
assert:
that:
- omni_os.name == ansible_distribution | lower
- omni_os.version_major == ansible_distribution_major_version
fail_msg: "Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}', expected '{{ omni_os.name }} {{ omni_os.version_major }}'"
success_msg: "Remote is running expected OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}'"
- name: Check required interpreter settings
assert:
that:
- ansible_python_interpreter.startswith(omni_ansible_venv) is true
fail_msg: "Interpreter '{{ ansible_python_interpreter }}' is not in the expected venv '{{ omni_ansible_venv }}'"
success_msg: "Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"

View File

@ -1,29 +0,0 @@
---
- import_playbook: initialize.yml
- name: Configure system settings
hosts: all
vars_files:
- vars/packages.yml
pre_tasks:
- import_tasks: tasks/centos-8-kernelplus.yml
tasks:
- import_tasks: tasks/packages/clean.yml
when: _runtime_clean is true
- import_tasks: tasks/packages/repos.yml
- import_tasks: tasks/packages/update.yml
when: _runtime_update is true
- import_tasks: tasks/packages/install.yml
- import_playbook: configure-network.yml
- import_playbook: configure-mgmt.yml
- import_playbook: configure-env.yml

View File

@ -1,2 +0,0 @@
---
# TBW

View File

@ -1,61 +0,0 @@
---
# TBW
# - import_playbook: provision-common.yml
#
#
# - name: Install and start Docker
# hosts: virtualization
# tasks:
# - import_tasks: tasks/docker/install.yml
#
# - name: Start and enable docker service
# become: true
# systemd:
# name: docker
# state: started
# enabled: yes
#
# - name: Allow swarm traffic through the firewall
# become: true
# firewalld:
# zone: trusted
# interface: "{{ item.key }}"
# permanent: true
# state: enabled
#
#
# - name: Configure swarm master
# hosts: "{{ omni_docker_swarm_manager }}"
# tasks:
# - name: Initialize swarm
# docker_swarm:
# state: present
# advertise_addr: "{{ omni_docker_swarm_iface }}"
#
# - name: Set swarm master to DRAIN
# docker_node:
# hostname: "{{ ansible_host }}"
# availability: drain
#
# - name: Configure swarm nodes
# hosts:
# - remus
# - romulus
# tags: docker-nodes
# tasks:
# - name: Fetch docker swarm information
# delegate_to: jupiter
# docker_swarm_info:
# register: _swarm_info
#
# - name: Join workers to swarm
# docker_swarm:
# state: join
# remote_addrs: ["jupiter.svr.local"]
# join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
# advertise_addr: "{{ omni_docker_swarm_iface }}"
#
# # docker plugin install --alias glusterfs trajano/glusterfs-volume-plugin:v2.0.3 --grant-all-permissions --disable
# # docker plugin set glusterfs SERVERS=jupiter.svr.local,remus.svr.local,romulus.svr.local
# # docker plugin enable glusterfs

View File

@ -1,16 +0,0 @@
---
# First: meta setup. Check everything is as we expect and that we have a remote
# venv with required dependencies
- import_playbook: initialize.yml
# Second: initial setup. Enforces the system to a "known good" state that we can
# work with
- import_playbook: provision-common.yml
# Third: setup the datastore. Lots of downstream stuff won't work without the ability
# to mount data storage
- import_playbook: provision-datastore.yml
# Finally: setup the docker swarm. Configures the workers, security, web proxy, and
# management system. Once done, applications are ready for deployment
- import_playbook: provison-swarm.yml

View File

@ -1 +0,0 @@
../resources

57
playbooks/update-plex.yml Normal file
View File

@ -0,0 +1,57 @@
---
# - hosts: vm-host-plex.net.enp.one
# #gather_facts: false
# tasks:
# - name: Query plex API (shhh) to load latest releases
# get_url:
# url: https://plex.tv/api/downloads/5.json
# dest: "{{ plex_releases_file | default('/tmp/plexreleases.json') }}"
- hosts: plex
name: Update Plex Media Server to latest version
vars:
plex_releases: "{{ lookup('url', 'https://plex.tv/api/downloads/5.json') | from_json }}"
tasks:
- name: Identifiy the proper release file
when: (ansible_os_family | lower == item["distro"]) and (ansible_distribution | lower in item["label"] | lower) and (ansible_userspace_bits in item["label"])
set_fact:
plex_release_url: "{{ item.url }}"
plex_release_checksum: "{{ item.checksum }}"
loop: "{{ plex_releases['computer']['Linux']['releases'] }}"
- name: Download package
get_url:
url: "{{ plex_release_url }}"
checksum: sha1:{{ plex_release_checksum }}
dest: /tmp/plexmediaserver-{{ plex_release_checksum }}.{{ plex_release_url.split(".")[-1] }}
- name: Stop the PMS service
become: true
systemd:
name: "{{ plex_service | default('plexmediaserver') }}"
state: stopped
- name: Install update package
become: true
block:
- name: Install update package using DNF
when: ansible_distribution == "Fedora"
dnf:
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
state: latest
- name: Install update package using YUM
when: ansible_distribution == "CentOS"
yum:
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
state: latest
- name: Install update package using APT
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
apt:
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.deb
state: latest
- name: Start the PMS service
become: true
systemd:
name: "{{ plex_service | default('plexmediaserver') }}"
state: started

1432
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,22 +2,22 @@
name = "omni-ansible" name = "omni-ansible"
version = "0.0.0" version = "0.0.0"
description = "Network deployment procedures and configuration state" description = "Network deployment procedures and configuration state"
authors = ["Ethan Paul <me@enp.one>"] authors = ["Ethan Paul <e@enp.one>"]
license = "MIT" license = "MIT"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.7" python = "^3.7"
ansible = "^2.9.4" ansible = "^2.9.4"
paramiko = "^2.7.1"
jinja2 = "^2.11.1"
docker = "^4.2.0" docker = "^4.2.0"
docker-compose = "^1.25.4" docker-compose = "^1.25.4"
paramiko = "^2.7.1"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]
ansible-lint = "^4.2.0" ansible-lint = "^4.2.0"
ansible-toolbox = "^0.3"
pre-commit = "^2.9.2"
pre-commit-hooks = "^3.3.0"
safety = "^1.9.0"
tox = "^3.20.1"
tox-poetry-installer = "^0.5.2"
yamllint = "^1.20.0" yamllint = "^1.20.0"
ansible-toolbox = "^0.3"
[build-system]
requires = ["poetry>=1.0.0"]
build-backend = "poetry.masonry.api"

View File

@ -1,4 +0,0 @@
alias doc='cd ~/Documents'
alias dn='cd ~/Downloads'
alias gg='cd ~/Git'
alias explorer='nautilus'

View File

@ -1,12 +0,0 @@
alias bk='cd -'
alias fuck='sudo $(history -p \!\!)'
alias ls='ls -lshF --color --group-directories-first --time-style=long-iso'
alias version='uname -orp && lsb_release -a | grep Description'
alias activate='source ./bin/activate'
alias cls='clear'
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
alias date='/usr/bin/date --iso-8601=seconds'
alias whatismyip='curl https://icanhazip.com/'
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
alias epoch="python3 -c 'import time; print(time.time());'"

View File

@ -1,7 +0,0 @@
function _parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
export rc=/home/$USERNAME/.bashrc
export VIRTUALENV_DIR=/home/$USERNAME/.venvs

View File

@ -1,18 +0,0 @@
random() {
if [[ $# -eq 0 ]]; then
num=32
else
num=$1
fi
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $num | head -n 1
}
function up() { cd $(eval printf '../'%.0s {1..$1}); }
function pipin() { pip freeze | grep $1; }
function passhash() {
read -sp 'Password: ' tmppass;
echo $tmppass | python3 -c 'import crypt; print(crypt.crypt(input(), crypt.mksalt(crypt.METHOD_SHA512)));';
unset tmppass;
}

View File

@ -1,76 +0,0 @@
#!/env/bash
function pyenv () {
usage="Custom Python virtualenv manager
sivenv [list, delete, load, new] [VENV]
Commands:
list List existing virtualenvs (alias: 'ls')
load VENV Activate the virtualenv named VENV (alias: 'source')
new VENV [VERSION] Create and load a new virtualenv named VENV. Optionally VERSION
can be a python version to use for creating the venv. Note that
only python3 versions are supported.
delete VENV Delete the virtualenv named VENV (alias: 'rm')";
if [ $# -eq 0 ]; then
echo "Error: no command specified" >&2;
echo "$usage";
return 1;
fi;
case $1 in
"-h"| "--help")
echo "$usage";
return 0;;
"ls"| "list")
lsvenv "$VIRTUALENV_DIR";;
"rm"| "delete")
if [ $# -ne 2 ]; then
echo "Error: no virtualenv specified" >&2;
return 1;
fi;
rm --recursive --force "${VIRTUALENV_DIR:?}/$2";;
"source" | "load")
if [ $# -ne 2 ]; then
echo "Error: no virtualenv specified" >&2;
return 1;
fi;
# shellcheck source=/dev/null
source "$VIRTUALENV_DIR/$2/bin/activate";;
"new")
if [ $# -lt 2 ]; then
echo "Error: no virtualenv specified" >&2;
return 1;
fi;
if [ $# -eq 3 ]; then
version="$3";
else
version="3";
fi
if ! command -v "python$version" &>/dev/null; then
echo "Error: no interpreter found for python version '$version'" >&2;
return 2;
fi
if python$version -m venv "$VIRTUALENV_DIR/$2"; then
echo "New virtualenv '$2' created using $(command -v python$version)" >&2;
# shellcheck source=/dev/null
source "$VIRTUALENV_DIR/$2/bin/activate"
else
return $?;
fi;;
*)
echo "Error: unknown command '$1'" >&2;
echo "$usage";
return 1;;
esac
}
function lsvenv () {
venvs=()
for item in /usr/bin/ls -d "$1"/*/; do
if stat "${item}/bin/activate" &>/dev/null; then
venvs+=("$(basename "$item")");
fi
done
echo "${venvs[*]}"
}

View File

@ -15,8 +15,8 @@
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
yum: yum:
# Update the cache to update with the new docker repo # Update the cache to update with the new docker repo
update_cache: true update_cache: yes
state: "{{ _runtime_update_state }}" state: latest
name: name:
- device-mapper-persistent-data # Required for docker devicestorage driver - device-mapper-persistent-data # Required for docker devicestorage driver
- lvm2 # same - lvm2 # same
@ -28,8 +28,8 @@
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8" when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
dnf: dnf:
# Update the cache to update with the new docker repo # Update the cache to update with the new docker repo
update_cache: true update_cache: yes
state: "{{ _runtime_update_state }}" state: latest
name: name:
- device-mapper-persistent-data # Required for docker devicestorage driver - device-mapper-persistent-data # Required for docker devicestorage driver
- lvm2 # same - lvm2 # same

View File

@ -0,0 +1,19 @@
---
- import_tasks: install.yml
- name: Start and enable docker service
become: true
systemd:
name: docker
state: started
enabled: yes
- import_tasks: tasks/preprocess-users.yml
- name: Add superusers to the docker group
become: true
user:
name: "{{ item.name }}"
groups: docker
append: yes
loop: "{{ _users_local_admin }}"

View File

@ -0,0 +1,2 @@
---
omni_restart_services: false

View File

@ -1,4 +1,6 @@
--- ---
- import_tasks: packages.yml
- name: Configure networking via systemd - name: Configure networking via systemd
become: true become: true
when: omni_networking is defined when: omni_networking is defined
@ -11,14 +13,33 @@
- name: Make network files - name: Make network files
template: template:
src: networkd/network.j2 src: network.j2
dest: "/etc/systemd/network/{{ item.key }}.network" dest: "/etc/systemd/network/{{ item.key }}.network"
mode: 0644 mode: 0644
loop: "{{ omni_networking | dict2items }}" loop: "{{ omni_networking | dict2items }}"
- name: Make netdev files - name: Make netdev files
template: template:
src: networkd/netdev.j2 src: netdev.j2
dest: "/etc/systemd/network/{{ item.0.key + '.' + item.1 }}.netdev" dest: "/etc/systemd/network/{{ item.0.key + '.' + item.1 }}.netdev"
mode: 0644 mode: 0644
loop: "{{ omni_networking | dict2items | subelements('value.vlans', true) }}" loop: "{{ omni_networking | dict2items | subelements('value.vlans', true) }}"
- import_tasks: services.yml
- name: Symlink so systemd-resolved uses /etc/resolv.conf
become: true
file:
dest: /etc/resolv.conf
src: /run/systemd/resolve/resolv.conf
state: link
force: true
setype: net_conf_t
- name: Symlink so /etc/resolv.conf uses systemd
become: true
file:
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
src: /usr/lib/systemd/system/systemd-resolved.service
state: link
force: true

View File

@ -0,0 +1,38 @@
---
- name: Install networkd on Fedora
when: ansible_distribution == "Fedora"
become: true
dnf:
state: latest
name:
- systemd-resolved
- systemd-networkd
- name: Install networkd on CentOS 7
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
yum:
state: latest
name:
- systemd-resolved
- systemd-networkd
- name: Install networkd on CentOS 8
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
block:
# The systemd-networkd EPEL package is currently in the testing phase, so we have
# to enable the testing EPEL repo to install it. Note that this is also done in
# the packages role
# https://bugzilla.redhat.com/show_bug.cgi?id=1789146
- name: Enable EPEL-Testing repository on CentOS 8s
lineinfile:
path: /etc/yum.repos.d/epel-testing.repo
regexp: "enabled=(0|1)"
line: "enabled=1"
insertbefore: "^$"
firstmatch: true
- name: Install networkd
dnf:
state: latest
name: systemd-networkd

View File

@ -0,0 +1,39 @@
---
- name: Disable NetworkManager
become: true
systemd:
name: "{{ item }}"
enabled: false
loop:
- NetworkManager
- NetworkManager-wait-online
- name: Enable systemd-networkd
become: true
systemd:
name: "{{ item }}"
enabled: true
loop:
- systemd-networkd
- systemd-resolved
- systemd-networkd-wait-online
- name: Stop NetworkManager
when: omni_restart_services == true
become: true
systemd:
name: "{{ item }}"
state: stopped
loop:
- NetworkManager
- NetworkManager-wait-online
- name: Start systemd-networkd
when: omni_restart_services == true
become: true
systemd:
name: "{{ item }}"
state: started
loop:
- systemd-networkd
- systemd-resolved

Some files were not shown because too many files have changed in this diff Show More