Overhaul playbook organizational structure
provision playbooks now establish platform-related components of the macro system configure playbooks now configure/update/establish specific subcomponents of systems deploy playbooks will eventually deploy specific applications onto the platform
This commit is contained in:
		@@ -1,78 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure system authentication
 | 
			
		||||
  hosts: all
 | 
			
		||||
  roles:
 | 
			
		||||
    - role: sshd
 | 
			
		||||
  tasks:
 | 
			
		||||
    - import_tasks: tasks/preprocess-users.yml
 | 
			
		||||
 | 
			
		||||
    - name: Create local user accounts
 | 
			
		||||
      tags: users_create
 | 
			
		||||
      become: true
 | 
			
		||||
      block:
 | 
			
		||||
        - name: Create groups
 | 
			
		||||
          group:
 | 
			
		||||
            name: "{{ item }}"
 | 
			
		||||
            state: present
 | 
			
		||||
          loop: "{{ omni_local_targets + ['omni'] }}"
 | 
			
		||||
 | 
			
		||||
        - name: Load user passwords
 | 
			
		||||
          include_vars:
 | 
			
		||||
            file: secrets/passwords.yml
 | 
			
		||||
 | 
			
		||||
        - name: Create users
 | 
			
		||||
          user:
 | 
			
		||||
            name: "{{ item.name }}"
 | 
			
		||||
            comment: "{{ item.fullname | default('') }}"
 | 
			
		||||
            shell: /bin/bash
 | 
			
		||||
            groups: "{{ item.targets | intersect(omni_local_targets) + ['omni'] }}"
 | 
			
		||||
            system: "{{ item.svc | default(false) }}"
 | 
			
		||||
            state: present
 | 
			
		||||
            generate_ssh_key: false
 | 
			
		||||
            password: "{{ omni_users_secrets[item.name] }}"
 | 
			
		||||
          loop: "{{ _users_local }}"
 | 
			
		||||
 | 
			
		||||
    - name: Delete removed user accounts
 | 
			
		||||
      become: true
 | 
			
		||||
      user:
 | 
			
		||||
        name: "{{ item }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
      loop: "{{ _users_local_removed | default([]) | difference(omni_protected_users) }}"
 | 
			
		||||
 | 
			
		||||
    - name: Grant sudo permissions to admin user accounts
 | 
			
		||||
      become: true
 | 
			
		||||
      user:
 | 
			
		||||
        name: "{{ item.name }}"
 | 
			
		||||
        groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
 | 
			
		||||
        state: present
 | 
			
		||||
      loop: "{{ _users_local_admin }}"
 | 
			
		||||
 | 
			
		||||
    - name: Disable sudo password for ansible
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        create: true
 | 
			
		||||
        path: /etc/sudoers.d/30-ansible
 | 
			
		||||
        line: "ansible ALL=(ALL) NOPASSWD:ALL"
 | 
			
		||||
        mode: 0644
 | 
			
		||||
 | 
			
		||||
    - name: Disable sudo password for admin users
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        create: true
 | 
			
		||||
        path: /etc/sudoers.d/40-admin
 | 
			
		||||
        line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
 | 
			
		||||
        mode: 0644
 | 
			
		||||
        state: "{{ 'present' if omni_disable_sudo_password | default(false) | bool == true else 'absent' }}"
 | 
			
		||||
      loop: "{{ _users_local_admin }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure proper ownership of user home directories
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        group: "{{ item.name }}"
 | 
			
		||||
        owner: "{{ item.name }}"
 | 
			
		||||
        path: /home/{{ item.name }}
 | 
			
		||||
        recurse: true
 | 
			
		||||
        state: directory
 | 
			
		||||
      loop: "{{ _users_local }}"
 | 
			
		||||
 | 
			
		||||
    - import_tasks: tasks/deploy-ssh-keys.yml
 | 
			
		||||
@@ -1,69 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure docker hosts
 | 
			
		||||
  hosts: servers
 | 
			
		||||
  roles:
 | 
			
		||||
    - role: docker
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Allow swarm traffic through the firewall
 | 
			
		||||
      become: true
 | 
			
		||||
      firewalld:
 | 
			
		||||
        state: enabled
 | 
			
		||||
        service: docker-swarm
 | 
			
		||||
        zone: public
 | 
			
		||||
        permanent: true
 | 
			
		||||
        immediate: true
 | 
			
		||||
    - name: Disable firewall on docker bridge interface
 | 
			
		||||
      become: true
 | 
			
		||||
      firewalld:
 | 
			
		||||
        state: enabled
 | 
			
		||||
        interface: docker0
 | 
			
		||||
        zone: trusted
 | 
			
		||||
        permanent: true
 | 
			
		||||
        immediate: true
 | 
			
		||||
 | 
			
		||||
- name: Configure swarm master
 | 
			
		||||
  hosts: jupiter
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Configure portainer volume
 | 
			
		||||
      docker_volume:
 | 
			
		||||
        volume_name: portainer
 | 
			
		||||
 | 
			
		||||
    - name: Run portainer
 | 
			
		||||
      docker_container:
 | 
			
		||||
        name: omni.portainer
 | 
			
		||||
        image: portainer/portainer
 | 
			
		||||
        restart_policy: unless-stopped
 | 
			
		||||
        published_ports:
 | 
			
		||||
          - 0.0.0.0:8000:8000
 | 
			
		||||
          - 0.0.0.0:9000:9000
 | 
			
		||||
        volumes:
 | 
			
		||||
          - /var/run/docker.sock:/var/run/docker.sock
 | 
			
		||||
          - portainer:/data
 | 
			
		||||
 | 
			
		||||
    - name: Initialize swarm
 | 
			
		||||
      docker_swarm:
 | 
			
		||||
        state: present
 | 
			
		||||
        advertise_addr: "{{ omni_docker_swarm_iface }}"
 | 
			
		||||
 | 
			
		||||
    - name: Set swarm master to DRAIN
 | 
			
		||||
      docker_node:
 | 
			
		||||
        hostname: "{{ ansible_host }}"
 | 
			
		||||
        availability: drain
 | 
			
		||||
 | 
			
		||||
- name: Configure swarm nodes
 | 
			
		||||
  hosts:
 | 
			
		||||
    - remus
 | 
			
		||||
    - romulus
 | 
			
		||||
  tags: docker-nodes
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Fetch docker swarm information
 | 
			
		||||
      delegate_to: jupiter
 | 
			
		||||
      docker_swarm_info:
 | 
			
		||||
      register: _swarm_info
 | 
			
		||||
 | 
			
		||||
    - name: Join workers to swarm
 | 
			
		||||
      docker_swarm:
 | 
			
		||||
        state: join
 | 
			
		||||
        remote_addrs: ["jupiter.svr.local"]
 | 
			
		||||
        join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
 | 
			
		||||
        advertise_addr: "{{ omni_docker_swarm_iface }}"
 | 
			
		||||
@@ -7,21 +7,83 @@
 | 
			
		||||
      hostname:
 | 
			
		||||
        name: "{{ ansible_host }}"
 | 
			
		||||
 | 
			
		||||
    - import_tasks: tasks/preprocess-users.yml
 | 
			
		||||
    - import_tasks: tasks/sshd/banner.yml
 | 
			
		||||
 | 
			
		||||
    - name: Install network bash profile
 | 
			
		||||
    - name: Install global bash components
 | 
			
		||||
      become: true
 | 
			
		||||
      copy:
 | 
			
		||||
        src: bashrc.sh
 | 
			
		||||
        dest: /home/{{ item.name }}/.bashrc
 | 
			
		||||
        src: bash/{{ item }}.sh
 | 
			
		||||
        dest: /etc/profile.d/Z-{{ 10 + loop_index }}-enpn-{{ item }}.sh
 | 
			
		||||
        mode: 0644
 | 
			
		||||
      loop: "{{ _users_local }}"
 | 
			
		||||
      loop:
 | 
			
		||||
        - global
 | 
			
		||||
        - pyenv
 | 
			
		||||
        - aliases
 | 
			
		||||
        - helpers
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: loop_index
 | 
			
		||||
        label: "{{ item }}"
 | 
			
		||||
 | 
			
		||||
    - name: Disable dynamic MOTD
 | 
			
		||||
      become: true
 | 
			
		||||
      replace:
 | 
			
		||||
        path: /etc/pam.d/sshd
 | 
			
		||||
        regexp: "^session\\s+optional\\s+pam_motd\\.so.*$"
 | 
			
		||||
        replace: "#session    optional     pam_motd.so"
 | 
			
		||||
 | 
			
		||||
    - name: Remove legacy global bashrc
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: /etc/profile.d/ZA-enpn-bashrc.sh
 | 
			
		||||
        state: absent
 | 
			
		||||
 | 
			
		||||
    - name: Disable case-sensitive autocomplete
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        path: /home/{{ item.name }}/.inputrc
 | 
			
		||||
        line: set completion-ignore-case On
 | 
			
		||||
        path: /etc/inputrc
 | 
			
		||||
        line: set completion-ignore-case ((o|O)(n|ff))
 | 
			
		||||
        create: true
 | 
			
		||||
        mode: 0644
 | 
			
		||||
      loop: "{{ _users_local }}"
 | 
			
		||||
 | 
			
		||||
- name: Configure additional security settings on shared servers
 | 
			
		||||
  hosts: servers
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Identify local home directories
 | 
			
		||||
      become: true
 | 
			
		||||
      find:
 | 
			
		||||
        file_type: directory
 | 
			
		||||
        path: /home/
 | 
			
		||||
        recurse: false
 | 
			
		||||
      register: _local_home_dirs
 | 
			
		||||
 | 
			
		||||
    - name: Determine files to write-protect
 | 
			
		||||
      set_fact:
 | 
			
		||||
        _secure_files: >-
 | 
			
		||||
          {{ _secure_files | default([]) + [
 | 
			
		||||
            item.path ~ '/.bashrc',
 | 
			
		||||
            item.path ~ '/.bash_profile',
 | 
			
		||||
            item.path ~ '/.ssh/authorized_keys',
 | 
			
		||||
            item.path ~ '/.ssh/config'
 | 
			
		||||
          ] }}
 | 
			
		||||
      loop: "{{ _local_home_dirs.files }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.path }}"
 | 
			
		||||
 | 
			
		||||
    - name: Fetch status of secure files
 | 
			
		||||
      become: true
 | 
			
		||||
      stat:
 | 
			
		||||
        path: "{{ item }}"
 | 
			
		||||
      loop: "{{ _secure_files }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item }}"
 | 
			
		||||
      register: _secure_file_stats
 | 
			
		||||
 | 
			
		||||
    - name: Restrict access to secure files
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ item.item }}"
 | 
			
		||||
        state: "{{ 'file' if item.stat.exists else 'touch' }}"
 | 
			
		||||
        mode: 0400
 | 
			
		||||
      loop: "{{ _secure_file_stats.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "Write-protecting: {{ item.item }}"
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										164
									
								
								playbooks/configure-mgmt.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										164
									
								
								playbooks/configure-mgmt.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,164 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure server management services
 | 
			
		||||
  hosts: servers
 | 
			
		||||
  tasks:
 | 
			
		||||
    - import_tasks: tasks/sshd/secure.yml
 | 
			
		||||
 | 
			
		||||
    - name: Enable cockpit
 | 
			
		||||
      when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
 | 
			
		||||
      become: true
 | 
			
		||||
      systemd:
 | 
			
		||||
        name: cockpit.socket
 | 
			
		||||
        enabled: true
 | 
			
		||||
        state: started
 | 
			
		||||
 | 
			
		||||
- name: Configure virtualization management services
 | 
			
		||||
  hosts: virtualization
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Create docker group
 | 
			
		||||
      become: true
 | 
			
		||||
      group:
 | 
			
		||||
        name: docker
 | 
			
		||||
        state: present
 | 
			
		||||
 | 
			
		||||
- name: Configure local accounts
 | 
			
		||||
  hosts: all
 | 
			
		||||
  vars_files:
 | 
			
		||||
    - vars/accounts.yml
 | 
			
		||||
    - vars/secrets/passwords.yml
 | 
			
		||||
    - vars/sshkeys.yml
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Create omni group
 | 
			
		||||
      become: true
 | 
			
		||||
      group:
 | 
			
		||||
        name: "{{ omni_group.name }}"
 | 
			
		||||
        gid: "{{ omni_group.gid }}"
 | 
			
		||||
        state: present
 | 
			
		||||
 | 
			
		||||
    - name: Determine existing omni users
 | 
			
		||||
      changed_when: false
 | 
			
		||||
      shell:
 | 
			
		||||
        cmd: 'grep omni /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
 | 
			
		||||
      register: _existing_omni_users
 | 
			
		||||
 | 
			
		||||
    - name: Delete removed user accounts
 | 
			
		||||
      become: true
 | 
			
		||||
      when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
 | 
			
		||||
      user:
 | 
			
		||||
        name: "{{ item }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
      loop: "{{ _existing_omni_users.stdout_lines }}"
 | 
			
		||||
 | 
			
		||||
    - name: Delete removed user groups
 | 
			
		||||
      become: true
 | 
			
		||||
      when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
 | 
			
		||||
      group:
 | 
			
		||||
        name: "{{ item }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
      loop: "{{ _existing_omni_users.stdout_lines }}"
 | 
			
		||||
 | 
			
		||||
    - name: Delete removed user home directories
 | 
			
		||||
      become: true
 | 
			
		||||
      when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
 | 
			
		||||
      file:
 | 
			
		||||
        path: "/home/{{ item }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
      loop: "{{ _existing_omni_users.stdout_lines }}"
 | 
			
		||||
 | 
			
		||||
    - name: Create account groups
 | 
			
		||||
      become: true
 | 
			
		||||
      group:
 | 
			
		||||
        name: "{{ item.name }}"
 | 
			
		||||
        gid: "{{ item.uid }}"
 | 
			
		||||
        state: present
 | 
			
		||||
      loop: "{{ omni_users }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.uid }},{{ item.name }}"
 | 
			
		||||
 | 
			
		||||
    - name: Create accounts
 | 
			
		||||
      become: true
 | 
			
		||||
      user:
 | 
			
		||||
        name: "{{ item.name }}"
 | 
			
		||||
        state: present
 | 
			
		||||
        uid: "{{ item.uid }}"
 | 
			
		||||
        group: "{{ item.name }}"
 | 
			
		||||
        groups: >-
 | 
			
		||||
          {{
 | 
			
		||||
            [omni_group.name] +
 | 
			
		||||
            (['wheel' if ansible_os_family | lower == 'redhat' else 'sudo'] if item.admin | default(false) else []) +
 | 
			
		||||
            (['docker' if 'virtualization' in group_names else omni_group.name] if item.admin | default(false) else [])
 | 
			
		||||
          }}
 | 
			
		||||
        # The 'else omni_group.name' above is just some non-breaking value to cover the
 | 
			
		||||
        # false condition, it doesn't have special meaning
 | 
			
		||||
        comment: "{{ item.fullname | default('') }}"
 | 
			
		||||
        shell: "{{ '/bin/bash' if 'mgmt' in item.targets else '/bin/false' }}"
 | 
			
		||||
        system: "{{ item.svc | default(false) }}"
 | 
			
		||||
        generate_ssh_key: false
 | 
			
		||||
        password: "{{ omni_users_secrets[item.name] | default(none) }}"
 | 
			
		||||
      loop: "{{ omni_users }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.uid }},{{ item.name }}"
 | 
			
		||||
 | 
			
		||||
    - name: Disable sudo password for ansible
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        create: true
 | 
			
		||||
        path: /etc/sudoers.d/30-ansible
 | 
			
		||||
        line: "ansible ALL=(ALL) NOPASSWD:ALL"
 | 
			
		||||
        mode: 0644
 | 
			
		||||
 | 
			
		||||
    - name: Ensure proper ownership of user home directories
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: /home/{{ item.name }}
 | 
			
		||||
        state: directory
 | 
			
		||||
        group: "{{ item.name }}"
 | 
			
		||||
        owner: "{{ item.name }}"
 | 
			
		||||
        mode: 0700
 | 
			
		||||
      loop: "{{ omni_users }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.uid }},{{ item.name }}"
 | 
			
		||||
 | 
			
		||||
    - name: Enforce root password
 | 
			
		||||
      become: true
 | 
			
		||||
      user:
 | 
			
		||||
        name: root
 | 
			
		||||
        password: "{{ omni_users_secrets.root }}"
 | 
			
		||||
        state: present
 | 
			
		||||
 | 
			
		||||
    - name: Create SSH directory
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: /home/{{ item.name }}/.ssh
 | 
			
		||||
        owner: "{{ item.name }}"
 | 
			
		||||
        group: "{{ item.name }}"
 | 
			
		||||
        state: directory
 | 
			
		||||
        mode: 0755
 | 
			
		||||
      loop: "{{ omni_users }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.uid }},{{ item.name }}"
 | 
			
		||||
 | 
			
		||||
    - name: Update authorized keys
 | 
			
		||||
      become: true
 | 
			
		||||
      when: "'mgmt' in item.targets"
 | 
			
		||||
      authorized_key:
 | 
			
		||||
        user: "{{ item.name }}"
 | 
			
		||||
        key: "{{ omni_ssh_keys[item.name] | join('\n') }}"
 | 
			
		||||
        state: present
 | 
			
		||||
        exclusive: true
 | 
			
		||||
      loop: "{{ omni_users }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.uid }},{{ item.name }}"
 | 
			
		||||
 | 
			
		||||
    - name: Enforce ownership of authorized keys
 | 
			
		||||
      become: true
 | 
			
		||||
      when: "'mgmt' in item.targets"
 | 
			
		||||
      file:
 | 
			
		||||
        path: /home/{{ item.name }}/.ssh/authorized_keys
 | 
			
		||||
        state: file
 | 
			
		||||
        owner: "{{ item.name }}"
 | 
			
		||||
        group: "{{ item.name }}"
 | 
			
		||||
        mode: 0400
 | 
			
		||||
      loop: "{{ omni_users }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.uid }},{{ item.name }}"
 | 
			
		||||
@@ -1,35 +1,34 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure router
 | 
			
		||||
  hosts: router
 | 
			
		||||
  gather_facts: false
 | 
			
		||||
  pre_tasks:
 | 
			
		||||
    - name: Collect EdgeOS facts
 | 
			
		||||
      edgeos_facts:
 | 
			
		||||
        gather_subset: "!config"
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Configure interfaces
 | 
			
		||||
      edgeos_config:
 | 
			
		||||
        lines:
 | 
			
		||||
          - set interfaces ethernet eth0 address dhcp
 | 
			
		||||
          - set interfaces ethernet eth0 description EXTERNAL
 | 
			
		||||
          - set interfaces ethernet eth1 address 10.42.100.1/24
 | 
			
		||||
          - set interfaces ethernet eth1 address 10.42.99.1/24
 | 
			
		||||
          - set interfaces ethernet eth1 description LOCAL
 | 
			
		||||
          - set interfaces ethernet eth2 address 10.42.101.1/24
 | 
			
		||||
          - set interfaces ethernet eth2 description DOMAIN
 | 
			
		||||
# - name: Configure router
 | 
			
		||||
#   hosts: router
 | 
			
		||||
#   gather_facts: false
 | 
			
		||||
#   pre_tasks:
 | 
			
		||||
#     - name: Collect EdgeOS facts
 | 
			
		||||
#       edgeos_facts:
 | 
			
		||||
#         gather_subset: "!config"
 | 
			
		||||
#   tasks:
 | 
			
		||||
#     - name: Configure interfaces
 | 
			
		||||
#       edgeos_config:
 | 
			
		||||
#         lines:
 | 
			
		||||
#           - set interfaces ethernet eth0 address dhcp
 | 
			
		||||
#           - set interfaces ethernet eth0 description EXTERNAL
 | 
			
		||||
#           - set interfaces ethernet eth1 address 10.42.100.1/24
 | 
			
		||||
#           - set interfaces ethernet eth1 address 10.42.99.1/24
 | 
			
		||||
#           - set interfaces ethernet eth1 description LOCAL
 | 
			
		||||
#           - set interfaces ethernet eth2 address 10.42.101.1/24
 | 
			
		||||
#           - set interfaces ethernet eth2 description DOMAIN
 | 
			
		||||
 | 
			
		||||
- name: Configure servers
 | 
			
		||||
- name: Configure server networking
 | 
			
		||||
  hosts: servers
 | 
			
		||||
  roles:
 | 
			
		||||
    - role: networkd
 | 
			
		||||
  tasks:
 | 
			
		||||
    - import_tasks: tasks/networkd/install.yml
 | 
			
		||||
    - import_tasks: tasks/networkd/configure.yml
 | 
			
		||||
    - import_tasks: tasks/networkd/services.yml
 | 
			
		||||
 | 
			
		||||
    - name: Configure local hostsfile
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        path: /etc/hosts
 | 
			
		||||
        state: present
 | 
			
		||||
        line: "{{ item }}"
 | 
			
		||||
      loop:
 | 
			
		||||
        - "192.168.42.10  jupiter.svr.local"
 | 
			
		||||
        - "192.168.42.20  remus.svr.local"
 | 
			
		||||
        - "192.168.42.30  romulus.svr.local"
 | 
			
		||||
        line: "{{ item.ip }}  {{ item.hostname }}"
 | 
			
		||||
      loop: "{{ omni_local_hosts | default([]) }}"
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										37
									
								
								playbooks/configure-webproxy.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								playbooks/configure-webproxy.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,37 @@
 | 
			
		||||
---
 | 
			
		||||
# TBW
 | 
			
		||||
 | 
			
		||||
# - name: Install Nginx
 | 
			
		||||
#   hosts: jupiter
 | 
			
		||||
#   handlers:
 | 
			
		||||
#     - name: restart_nginx
 | 
			
		||||
#       become: true
 | 
			
		||||
#       systemd:
 | 
			
		||||
#         name: nginx
 | 
			
		||||
#         state: restarted
 | 
			
		||||
#   tasks:
 | 
			
		||||
#     - name: Install nginx and certbot
 | 
			
		||||
#       become: true
 | 
			
		||||
#       dnf:
 | 
			
		||||
#         name:
 | 
			
		||||
#           - nginx
 | 
			
		||||
#           - certbot
 | 
			
		||||
#           - python3-certbot-nginx
 | 
			
		||||
#         state: present
 | 
			
		||||
#
 | 
			
		||||
#     - name: Enable and start nginx
 | 
			
		||||
#       become: true
 | 
			
		||||
#       systemd:
 | 
			
		||||
#         name: nginx
 | 
			
		||||
#         state: started
 | 
			
		||||
#         enabled: true
 | 
			
		||||
#
 | 
			
		||||
#     - name: Install configuration
 | 
			
		||||
#       become: true
 | 
			
		||||
#       copy:
 | 
			
		||||
#         src: nginx.conf
 | 
			
		||||
#         dest: /etc/nginx/nginx.conf
 | 
			
		||||
#       notify:
 | 
			
		||||
#         - restart_nginx
 | 
			
		||||
#
 | 
			
		||||
# # sudo setsebool -P httpd_can_network_connect on
 | 
			
		||||
@@ -1,32 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- import_playbook: meta.yml
 | 
			
		||||
 | 
			
		||||
- name: Configure system settings
 | 
			
		||||
  hosts: all
 | 
			
		||||
  pre_tasks:
 | 
			
		||||
    - import_tasks: tasks/centos-8-kernelplus.yml
 | 
			
		||||
      tags: kernel
 | 
			
		||||
      when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
 | 
			
		||||
  roles:
 | 
			
		||||
    - role: packages
 | 
			
		||||
      vars:
 | 
			
		||||
        omni_pkg_clean: true
 | 
			
		||||
    - role: sshd
 | 
			
		||||
      vars:
 | 
			
		||||
        omni_restart_services: true
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Enable cockpit
 | 
			
		||||
      become: true
 | 
			
		||||
      when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
 | 
			
		||||
      systemd:
 | 
			
		||||
        name: cockpit
 | 
			
		||||
        enabled: true
 | 
			
		||||
        state: started
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-auth.yml
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-env.yml
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-network.yml
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-docker.yml
 | 
			
		||||
@@ -1,67 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- hosts: nimbus-1.net.enp.one
 | 
			
		||||
  name: Deploy documentation
 | 
			
		||||
  vars:
 | 
			
		||||
    # Local directory to use for cloning and building the documentation site
 | 
			
		||||
    DIR_BUILD: /tmp/docs
 | 
			
		||||
    # Remote directory to install the site at
 | 
			
		||||
    DIR_DEPLOY: /usr/share/nginx/doc.enp.one/html
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Build the static site locally
 | 
			
		||||
      delegate_to: 127.0.0.1
 | 
			
		||||
      block:
 | 
			
		||||
        - name: Ensure the build directory does not exist
 | 
			
		||||
          file:
 | 
			
		||||
            path: "{{ DIR_BUILD }}"
 | 
			
		||||
            state: absent
 | 
			
		||||
        - name: Clone documentation repository
 | 
			
		||||
          git:
 | 
			
		||||
            repo: git@vcs.enp.one:omni/omni-docs.git
 | 
			
		||||
            dest: "{{ DIR_BUILD }}/"
 | 
			
		||||
        - name: Generate build env requirements file
 | 
			
		||||
          # Generate the requirements.txt style format, pipe through grep to remove
 | 
			
		||||
          # the index line (not sure why thats included at all tbh) and save the
 | 
			
		||||
          # result in "requirements.txt" to usage with pip
 | 
			
		||||
          shell: pipenv lock --requirements | grep --invert-match "\-i">requirements.txt
 | 
			
		||||
          args:
 | 
			
		||||
            chdir: "{{ DIR_BUILD }}/"
 | 
			
		||||
        - name: Create build env and install requirements
 | 
			
		||||
          pip:
 | 
			
		||||
            requirements: "{{ DIR_BUILD }}/requirements.txt"
 | 
			
		||||
            virtualenv: "{{ DIR_BUILD }}/venv"
 | 
			
		||||
            virtualenv_python: python3
 | 
			
		||||
            state: present
 | 
			
		||||
        - name: Build the static site using mkdocs
 | 
			
		||||
          shell: "{{ DIR_BUILD }}/venv/bin/mkdocs build"
 | 
			
		||||
          args:
 | 
			
		||||
            chdir: "{{ DIR_BUILD }}"
 | 
			
		||||
 | 
			
		||||
    - name: Upload static site to remote
 | 
			
		||||
      copy:
 | 
			
		||||
        src: "{{ DIR_BUILD }}/site/"
 | 
			
		||||
        dest: "/tmp/docs/"
 | 
			
		||||
    - name: Remove legacy site
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ DIR_DEPLOY }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
    - name: Copy static site to deployment directory
 | 
			
		||||
      become: true
 | 
			
		||||
      copy:
 | 
			
		||||
        src: "/tmp/docs/"
 | 
			
		||||
        dest: "{{ DIR_DEPLOY }}"
 | 
			
		||||
        remote_src: true
 | 
			
		||||
        owner: root
 | 
			
		||||
        group: nginx
 | 
			
		||||
        mode: 0755
 | 
			
		||||
        setype: httpd_sys_content_t
 | 
			
		||||
 | 
			
		||||
    - name: Clean up local build directory
 | 
			
		||||
      delegate_to: 127.0.0.1
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ DIR_BUILD }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
    - name: Clean up remote temp directory
 | 
			
		||||
      file:
 | 
			
		||||
        path: /tmp/docs
 | 
			
		||||
        state: absent
 | 
			
		||||
@@ -1,38 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- hosts: nimbus-1.net.enp.one
 | 
			
		||||
  name: Deploy main landing page at enpaul.net
 | 
			
		||||
  vars:
 | 
			
		||||
    # Local directory to use for cloning and building the documentation site
 | 
			
		||||
    DIR_BUILD: /tmp/docs
 | 
			
		||||
    # Remote directory to install the site at
 | 
			
		||||
    DIR_DEPLOY: /usr/share/nginx/enpaul.net/html
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Upload static site to remote
 | 
			
		||||
      copy:
 | 
			
		||||
        src: "{{ DIR_BUILD }}/site/"
 | 
			
		||||
        dest: "/tmp/docs/"
 | 
			
		||||
    - name: Remove legacy site
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ DIR_DEPLOY }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
    - name: Copy static site to deployment directory
 | 
			
		||||
      become: true
 | 
			
		||||
      copy:
 | 
			
		||||
        src: "/tmp/docs/"
 | 
			
		||||
        dest: "{{ DIR_DEPLOY }}"
 | 
			
		||||
        remote_src: true
 | 
			
		||||
        owner: root
 | 
			
		||||
        group: nginx
 | 
			
		||||
        mode: 0755
 | 
			
		||||
        setype: httpd_sys_content_t
 | 
			
		||||
 | 
			
		||||
    - name: Clean up local build directory
 | 
			
		||||
      delegate_to: 127.0.0.1
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ DIR_BUILD }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
    - name: Clean up remote temp directory
 | 
			
		||||
      file:
 | 
			
		||||
        path: /tmp/docs
 | 
			
		||||
        state: absent
 | 
			
		||||
@@ -1,78 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Configure local users
 | 
			
		||||
  hosts: all:!network
 | 
			
		||||
  tags:
 | 
			
		||||
    - auth
 | 
			
		||||
    - ssh
 | 
			
		||||
    - users
 | 
			
		||||
  tasks:
 | 
			
		||||
    - import_tasks: tasks/preprocess-local-users.yml
 | 
			
		||||
 | 
			
		||||
    - name: Create local user accounts
 | 
			
		||||
      tags: users_create
 | 
			
		||||
      become: true
 | 
			
		||||
      block:
 | 
			
		||||
        - name: Create groups
 | 
			
		||||
          group:
 | 
			
		||||
            name: "{{ item }}"
 | 
			
		||||
            state: present
 | 
			
		||||
          loop: "{{ local_targets + ['omni'] }}"
 | 
			
		||||
 | 
			
		||||
        - name: Load user passwords
 | 
			
		||||
          include_vars:
 | 
			
		||||
            file: secrets/passwords.yml
 | 
			
		||||
 | 
			
		||||
        - name: Create users
 | 
			
		||||
          user:
 | 
			
		||||
            name: "{{ item.name }}"
 | 
			
		||||
            comment: "{{ item.fullname | default('') }}"
 | 
			
		||||
            shell: /bin/bash
 | 
			
		||||
            groups: "{{ item.targets | intersect(local_targets) + ['omni'] }}"
 | 
			
		||||
            system: "{{ item.svc | default(False) }}"
 | 
			
		||||
            state: present
 | 
			
		||||
            generate_ssh_key: false
 | 
			
		||||
            password: "{{ users_secrets[item.name] }}"
 | 
			
		||||
          loop: "{{ users_local }}"
 | 
			
		||||
 | 
			
		||||
    - name: Delete removed user accounts
 | 
			
		||||
      become: true
 | 
			
		||||
      user:
 | 
			
		||||
        name: "{{ item }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
      loop: "{{ users_local_removed | default([]) | difference(protected_users) }}"
 | 
			
		||||
 | 
			
		||||
    - name: Grant sudo permissions to admin user accounts
 | 
			
		||||
      become: true
 | 
			
		||||
      user:
 | 
			
		||||
        name: "{{ item.name }}"
 | 
			
		||||
        groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
 | 
			
		||||
        state: present
 | 
			
		||||
      loop: "{{ users_local_admin }}"
 | 
			
		||||
 | 
			
		||||
    - name: Disable sudo password for ansible
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        create: true
 | 
			
		||||
        path: /etc/sudoers.d/30-ansible
 | 
			
		||||
        line: "ansible ALL=(ALL) NOPASSWD:ALL"
 | 
			
		||||
        mode: 0644
 | 
			
		||||
 | 
			
		||||
    - name: Disable sudo password for admin users
 | 
			
		||||
      become: true
 | 
			
		||||
      lineinfile:
 | 
			
		||||
        create: true
 | 
			
		||||
        path: /etc/sudoers.d/40-admin
 | 
			
		||||
        line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
 | 
			
		||||
        mode: 0644
 | 
			
		||||
        state: "{{ 'present' if disable_sudo_password | bool == true else 'absent' }}"
 | 
			
		||||
      loop: "{{ users_local_admin }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure proper ownership of user home directories
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        group: "{{ item.name }}"
 | 
			
		||||
        owner: "{{ item.name }}"
 | 
			
		||||
        path: /home/{{ item.name }}
 | 
			
		||||
        recurse: true
 | 
			
		||||
        state: directory
 | 
			
		||||
      loop: "{{ users_local }}"
 | 
			
		||||
@@ -1,33 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Deploy plex container
 | 
			
		||||
  hosts: remus
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Create world volume
 | 
			
		||||
      docker_volume:
 | 
			
		||||
        name: minecraft
 | 
			
		||||
        driver: local
 | 
			
		||||
        state: present
 | 
			
		||||
        recreate: never
 | 
			
		||||
 | 
			
		||||
    - name: Launch minecraft server container
 | 
			
		||||
      docker_container:
 | 
			
		||||
        name: mcs
 | 
			
		||||
        state: started
 | 
			
		||||
        image: itzg/minecraft-server
 | 
			
		||||
        recreate: "{{ omni_update_minecraft | default(false) | bool }}"
 | 
			
		||||
        volumes:
 | 
			
		||||
          - minecraft:/data
 | 
			
		||||
        published_ports:
 | 
			
		||||
          - "25565:25565/tcp"
 | 
			
		||||
        env:
 | 
			
		||||
          EULA: "TRUE"
 | 
			
		||||
          VERSION: 1.15.2
 | 
			
		||||
          MAX_MEMORY: "8G"
 | 
			
		||||
          MOTD: "A home for buttery companions"
 | 
			
		||||
          MODE: survival
 | 
			
		||||
          OPS: ScifiGeek42
 | 
			
		||||
          WHITELIST: "ScifiGeek42,fantasycat256,CoffeePug,Snowdude21325,KaiserSJR,glutenfreebean"
 | 
			
		||||
          MAX_BUILD_HEIGHT: "512"
 | 
			
		||||
          SNOOPER_ENABLED: "false"
 | 
			
		||||
          ICON: https://cdn.enp.one/img/logos/e-w-sm.png
 | 
			
		||||
          ENABLE_RCON: "false"
 | 
			
		||||
@@ -1,44 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Deploy plex container
 | 
			
		||||
  hosts: remus
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Create plex metadata volume
 | 
			
		||||
      docker_volume:
 | 
			
		||||
        name: plexmeta
 | 
			
		||||
        driver: local
 | 
			
		||||
        state: present
 | 
			
		||||
        recreate: never
 | 
			
		||||
 | 
			
		||||
    - name: Create plex NFS media volume
 | 
			
		||||
      docker_volume:
 | 
			
		||||
        name: plexdata
 | 
			
		||||
        driver: local
 | 
			
		||||
        state: present
 | 
			
		||||
        recreate: never
 | 
			
		||||
        driver_options:
 | 
			
		||||
          type: nfs
 | 
			
		||||
          o: "addr=plexistore.tre2.local,ro"
 | 
			
		||||
          device: ":/nfs/plex"
 | 
			
		||||
 | 
			
		||||
    - name: Allow plex access through the firewall
 | 
			
		||||
      become: true
 | 
			
		||||
      firewalld:
 | 
			
		||||
        state: enabled
 | 
			
		||||
        service: plex
 | 
			
		||||
        permanent: true
 | 
			
		||||
        immediate: true
 | 
			
		||||
 | 
			
		||||
    - name: Launch plex container
 | 
			
		||||
      docker_container:
 | 
			
		||||
        name: pms
 | 
			
		||||
        state: started
 | 
			
		||||
        image: plexinc/pms-docker:latest
 | 
			
		||||
        pull: true
 | 
			
		||||
        recreate: "{{ omni_update_plex | default(false) | bool }}"
 | 
			
		||||
        network_mode: host
 | 
			
		||||
        volumes:
 | 
			
		||||
          - plexmeta:/config
 | 
			
		||||
          - plexdata:/data:ro
 | 
			
		||||
        env:
 | 
			
		||||
          TZ: America/New_York
 | 
			
		||||
          ALLOWED_NETWORKS: 10.42.100.0/24,10.42.101.0/24
 | 
			
		||||
@@ -1,22 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Update ssh keys on all devices
 | 
			
		||||
  hosts: all
 | 
			
		||||
  tasks:
 | 
			
		||||
    - import_tasks: tasks/preprocess-local-users.yml
 | 
			
		||||
 | 
			
		||||
    - name: Ensure SSH directory exists
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        state: directory
 | 
			
		||||
        path: /home/{{ item.name }}/.ssh
 | 
			
		||||
      loop: "{{ users_local }}"
 | 
			
		||||
 | 
			
		||||
    - name: Put keys on remote
 | 
			
		||||
      become: true
 | 
			
		||||
      when: item.keys != []
 | 
			
		||||
      authorized_key:
 | 
			
		||||
        user: "{{ item.name }}"
 | 
			
		||||
        key: "{{ item.sshkeys | join('\n') }}"
 | 
			
		||||
        state: present
 | 
			
		||||
        exclusive: yes
 | 
			
		||||
      loop: "{{ users_local }}"
 | 
			
		||||
							
								
								
									
										1
									
								
								playbooks/files
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								playbooks/files
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
../resources
 | 
			
		||||
@@ -1,63 +0,0 @@
 | 
			
		||||
# Global network bashrc/profile file
 | 
			
		||||
# Updated 2020-03-18
 | 
			
		||||
 | 
			
		||||
function parse_git_branch() {
 | 
			
		||||
    git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]$(parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
 | 
			
		||||
 | 
			
		||||
function venv() {
 | 
			
		||||
        DIR="/home/$USERNAME/.venvs"
 | 
			
		||||
 | 
			
		||||
        if [ $# -eq 0 ]; then
 | 
			
		||||
                echo "No command specified"
 | 
			
		||||
 | 
			
		||||
        elif [ $1 = "--help" ] || [ $1 = '-h' ]; then
 | 
			
		||||
                echo "Custom python Virtualenv manager
 | 
			
		||||
\"Because pipenv is too hard and everything else sucks\"
 | 
			
		||||
 | 
			
		||||
Commands:
 | 
			
		||||
  list                 List available virtualenvs
 | 
			
		||||
  show                 Alias of list
 | 
			
		||||
  delete <venv>        Delete a virtualenv
 | 
			
		||||
  del                  Alias of delete
 | 
			
		||||
  rm                   Alias of delete
 | 
			
		||||
  load <venv>          Activate a virtualenv for usage
 | 
			
		||||
  new <venv> <python>  Create a new virtualenv. If <python> is not specified,
 | 
			
		||||
                       then the system default python is used
 | 
			
		||||
"
 | 
			
		||||
        elif [ $1 = "list" ] || [ $1 = "show" ] || [ $1 = "ls" ]; then
 | 
			
		||||
                ls $DIR
 | 
			
		||||
        elif [ $1 = "load" ]; then
 | 
			
		||||
                . $DIR/$2/bin/activate
 | 
			
		||||
        elif [ $1 = "new" ]; then
 | 
			
		||||
                virtualenv $DIR/$2 --python=$3
 | 
			
		||||
        elif [ $1 = "delete" ] || [ $1 = "del" ] || [ $1 = "rm" ]; then
 | 
			
		||||
                rm -rf $DIR/$2
 | 
			
		||||
        elif [ $1 = "go" ]; then
 | 
			
		||||
                cd $DIR/$2
 | 
			
		||||
        fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function up() { cd $(eval printf '../'%.0s {1..$1}); }
 | 
			
		||||
 | 
			
		||||
function pipin() { pip freeze | grep $1; }
 | 
			
		||||
 | 
			
		||||
alias bk='cd -'
 | 
			
		||||
alias fuck='sudo $(history -p \!\!)'
 | 
			
		||||
alias doc='cd ~/Documents'
 | 
			
		||||
alias dn='cd ~/Downloads'
 | 
			
		||||
alias version='uname -orp && lsb_release -a | grep Description'
 | 
			
		||||
alias activate='source ./bin/activate'
 | 
			
		||||
alias cls='clear'
 | 
			
		||||
alias mklink='ln -s'
 | 
			
		||||
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
 | 
			
		||||
alias gg='cd ~/Git'
 | 
			
		||||
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
 | 
			
		||||
alias date='/usr/bin/date --iso-8601=seconds'
 | 
			
		||||
alias whatismyip='curl https://icanhazip.com/'
 | 
			
		||||
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
 | 
			
		||||
alias epoch="python3 -c 'import time; print(time.time());'"
 | 
			
		||||
 | 
			
		||||
export rc=/home/$USERNAME/.bashrc
 | 
			
		||||
@@ -1,12 +0,0 @@
 | 
			
		||||
alias powerline='/opt/powerline/bin/powerline'
 | 
			
		||||
alias powerline-config='/opt/powerline/bin/powerline-config'
 | 
			
		||||
alias powerline-daemon='/opt/powerline/bin/powerline-daemon'
 | 
			
		||||
alias powerline-lint='/opt/powerline/bin/powerline-lint'
 | 
			
		||||
alias powerline-render='/opt/powerline/bin/powerline-render'
 | 
			
		||||
 | 
			
		||||
if [ -z ${DISABLE_POWERLINE} ]; then
 | 
			
		||||
        powerline-daemon -q
 | 
			
		||||
        POWERLINE_BASH_CONTINUATION=1
 | 
			
		||||
        POWERLINE_BASH_SELECT=1
 | 
			
		||||
        source /opt/powerline/powerline.sh
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,153 +0,0 @@
 | 
			
		||||
_powerline_columns_fallback() {
 | 
			
		||||
	if which stty &>/dev/null ; then
 | 
			
		||||
		local cols="$(stty size 2>/dev/null)"
 | 
			
		||||
		if ! test -z "$cols" ; then
 | 
			
		||||
			echo "${cols#* }"
 | 
			
		||||
			return 0
 | 
			
		||||
		fi
 | 
			
		||||
	fi
 | 
			
		||||
	echo 0
 | 
			
		||||
	return 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_tmux_pane() {
 | 
			
		||||
	echo "${TMUX_PANE:-`TMUX="$_POWERLINE_TMUX" tmux display -p "#D"`}" | \
 | 
			
		||||
		tr -d ' %'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_tmux_setenv() {
 | 
			
		||||
	TMUX="$_POWERLINE_TMUX" tmux setenv -g TMUX_"$1"_`_powerline_tmux_pane` "$2"
 | 
			
		||||
	TMUX="$_POWERLINE_TMUX" tmux refresh -S
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_tmux_set_pwd() {
 | 
			
		||||
	if test "$_POWERLINE_SAVED_PWD" != "$PWD" ; then
 | 
			
		||||
		_POWERLINE_SAVED_PWD="$PWD"
 | 
			
		||||
		_powerline_tmux_setenv PWD "$PWD"
 | 
			
		||||
	fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_return() {
 | 
			
		||||
	return $1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_POWERLINE_HAS_PIPESTATUS="$(
 | 
			
		||||
	_powerline_return 0 | _powerline_return 43
 | 
			
		||||
	test "${PIPESTATUS[*]}" = "0 43"
 | 
			
		||||
	echo "$?"
 | 
			
		||||
)"
 | 
			
		||||
 | 
			
		||||
_powerline_has_pipestatus() {
 | 
			
		||||
	return $_POWERLINE_HAS_PIPESTATUS
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_status_wrapper() {
 | 
			
		||||
	local last_exit_code=$? last_pipe_status=( "${PIPESTATUS[@]}" )
 | 
			
		||||
 | 
			
		||||
	if ! _powerline_has_pipestatus \
 | 
			
		||||
	   || test "${#last_pipe_status[@]}" -eq "0" \
 | 
			
		||||
	   || test "$last_exit_code" != "${last_pipe_status[$(( ${#last_pipe_status[@]} - 1 ))]}" ; then
 | 
			
		||||
		last_pipe_status=()
 | 
			
		||||
	fi
 | 
			
		||||
	"$@" $last_exit_code "${last_pipe_status[*]}"
 | 
			
		||||
	return $last_exit_code
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_add_status_wrapped_command() {
 | 
			
		||||
	local action="$1" ; shift
 | 
			
		||||
	local cmd="$1" ; shift
 | 
			
		||||
	full_cmd="_powerline_status_wrapper $cmd"
 | 
			
		||||
	if test "$action" = "append" ; then
 | 
			
		||||
		PROMPT_COMMAND="$PROMPT_COMMAND"$'\n'"$full_cmd"
 | 
			
		||||
	else
 | 
			
		||||
		PROMPT_COMMAND="$full_cmd"$'\n'"$PROMPT_COMMAND"
 | 
			
		||||
	fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_tmux_set_columns() {
 | 
			
		||||
	_powerline_tmux_setenv COLUMNS "${COLUMNS:-`_powerline_columns_fallback`}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_init_tmux_support() {
 | 
			
		||||
	if test -n "$TMUX" && tmux refresh -S &>/dev/null ; then
 | 
			
		||||
		# TMUX variable may be unset to create new tmux session inside this one
 | 
			
		||||
		_POWERLINE_TMUX="$TMUX"
 | 
			
		||||
 | 
			
		||||
		trap '_powerline_tmux_set_columns' WINCH
 | 
			
		||||
		_powerline_tmux_set_columns
 | 
			
		||||
 | 
			
		||||
		test "$PROMPT_COMMAND" != "${PROMPT_COMMAND/_powerline_tmux_set_pwd}" \
 | 
			
		||||
			|| _powerline_add_status_wrapped_command append _powerline_tmux_set_pwd
 | 
			
		||||
	fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_local_prompt() {
 | 
			
		||||
	# Arguments:
 | 
			
		||||
	# 1: side
 | 
			
		||||
	# 2: renderer_module arg
 | 
			
		||||
	# 3: last_exit_code
 | 
			
		||||
	# 4: last_pipe_status
 | 
			
		||||
	# 5: jobnum
 | 
			
		||||
	# 6: local theme
 | 
			
		||||
	"$POWERLINE_COMMAND" $POWERLINE_COMMAND_ARGS shell $1 \
 | 
			
		||||
		$2 \
 | 
			
		||||
		--last-exit-code=$3 \
 | 
			
		||||
		--last-pipe-status="$4" \
 | 
			
		||||
		--jobnum=$5 \
 | 
			
		||||
		--renderer-arg="client_id=$$" \
 | 
			
		||||
		--renderer-arg="local_theme=$6"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_prompt() {
 | 
			
		||||
	# Arguments:
 | 
			
		||||
	# 1: side
 | 
			
		||||
	# 2: last_exit_code
 | 
			
		||||
	# 3: last_pipe_status
 | 
			
		||||
	# 4: jobnum
 | 
			
		||||
	"$POWERLINE_COMMAND" $POWERLINE_COMMAND_ARGS shell $1 \
 | 
			
		||||
		--width="${COLUMNS:-$(_powerline_columns_fallback)}" \
 | 
			
		||||
		-r.bash \
 | 
			
		||||
		--last-exit-code=$2 \
 | 
			
		||||
		--last-pipe-status="$3" \
 | 
			
		||||
		--jobnum=$4 \
 | 
			
		||||
		--renderer-arg="client_id=$$"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_set_prompt() {
 | 
			
		||||
	local last_exit_code=$1 ; shift
 | 
			
		||||
	local last_pipe_status=$1 ; shift
 | 
			
		||||
	local jobnum="$(jobs -p|wc -l)"
 | 
			
		||||
	PS1="$(_powerline_prompt aboveleft $last_exit_code "$last_pipe_status" $jobnum)"
 | 
			
		||||
	if test -n "$POWERLINE_SHELL_CONTINUATION$POWERLINE_BASH_CONTINUATION" ; then
 | 
			
		||||
		PS2="$(_powerline_local_prompt left -r.bash $last_exit_code "$last_pipe_status" $jobnum continuation)"
 | 
			
		||||
	fi
 | 
			
		||||
	if test -n "$POWERLINE_SHELL_SELECT$POWERLINE_BASH_SELECT" ; then
 | 
			
		||||
		PS3="$(_powerline_local_prompt left '' $last_exit_code "$last_pipe_status" $jobnum select)"
 | 
			
		||||
	fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
_powerline_setup_prompt() {
 | 
			
		||||
	VIRTUAL_ENV_DISABLE_PROMPT=1
 | 
			
		||||
	if test -z "${POWERLINE_COMMAND}" ; then
 | 
			
		||||
		POWERLINE_COMMAND="$("$POWERLINE_CONFIG_COMMAND" shell command)"
 | 
			
		||||
	fi
 | 
			
		||||
	test "$PROMPT_COMMAND" != "${PROMPT_COMMAND%_powerline_set_prompt*}" \
 | 
			
		||||
		|| _powerline_add_status_wrapped_command prepend _powerline_set_prompt
 | 
			
		||||
	PS2="$(_powerline_local_prompt left -r.bash 0 0 0 continuation)"
 | 
			
		||||
	PS3="$(_powerline_local_prompt left '' 0 0 0 select)"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if test -z "${POWERLINE_CONFIG_COMMAND}" ; then
 | 
			
		||||
	if which powerline-config >/dev/null ; then
 | 
			
		||||
		POWERLINE_CONFIG_COMMAND=powerline-config
 | 
			
		||||
	else
 | 
			
		||||
		POWERLINE_CONFIG_COMMAND="$(dirname "$BASH_SOURCE")/../../../scripts/powerline-config"
 | 
			
		||||
	fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if "${POWERLINE_CONFIG_COMMAND}" shell --shell=bash uses prompt ; then
 | 
			
		||||
	_powerline_setup_prompt
 | 
			
		||||
fi
 | 
			
		||||
if "${POWERLINE_CONFIG_COMMAND}" shell --shell=bash uses tmux ; then
 | 
			
		||||
	_powerline_init_tmux_support
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,53 +0,0 @@
 | 
			
		||||
{
 | 
			
		||||
	"common": {
 | 
			
		||||
		"term_truecolor": false
 | 
			
		||||
	},
 | 
			
		||||
	"ext": {
 | 
			
		||||
		"ipython": {
 | 
			
		||||
			"colorscheme": "default",
 | 
			
		||||
			"theme": "in",
 | 
			
		||||
			"local_themes": {
 | 
			
		||||
				"rewrite": "rewrite",
 | 
			
		||||
				"out": "out",
 | 
			
		||||
				"in2": "in2"
 | 
			
		||||
			}
 | 
			
		||||
		},
 | 
			
		||||
		"pdb": {
 | 
			
		||||
			"colorscheme": "default",
 | 
			
		||||
			"theme": "default"
 | 
			
		||||
		},
 | 
			
		||||
		"shell": {
 | 
			
		||||
			"colorscheme": "default",
 | 
			
		||||
			"theme": "default_leftonly",
 | 
			
		||||
			"local_themes": {
 | 
			
		||||
				"continuation": "continuation",
 | 
			
		||||
				"select": "select"
 | 
			
		||||
			}
 | 
			
		||||
		},
 | 
			
		||||
		"tmux": {
 | 
			
		||||
			"colorscheme": "default",
 | 
			
		||||
			"theme": "default"
 | 
			
		||||
		},
 | 
			
		||||
		"vim": {
 | 
			
		||||
			"colorscheme": "default",
 | 
			
		||||
			"theme": "default",
 | 
			
		||||
			"local_themes": {
 | 
			
		||||
				"__tabline__": "tabline",
 | 
			
		||||
 | 
			
		||||
				"cmdwin": "cmdwin",
 | 
			
		||||
				"help": "help",
 | 
			
		||||
				"quickfix": "quickfix",
 | 
			
		||||
 | 
			
		||||
				"powerline.matchers.vim.plugin.nerdtree.nerdtree": "plugin_nerdtree",
 | 
			
		||||
				"powerline.matchers.vim.plugin.commandt.commandt": "plugin_commandt",
 | 
			
		||||
				"powerline.matchers.vim.plugin.gundo.gundo": "plugin_gundo",
 | 
			
		||||
				"powerline.matchers.vim.plugin.gundo.gundo_preview": "plugin_gundo-preview"
 | 
			
		||||
			}
 | 
			
		||||
		},
 | 
			
		||||
		"wm": {
 | 
			
		||||
			"colorscheme": "default",
 | 
			
		||||
			"theme": "default",
 | 
			
		||||
			"update_interval": 2
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										128
									
								
								playbooks/initialize.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								playbooks/initialize.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,128 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Bootstrap remote ansible environment
 | 
			
		||||
  hosts: all
 | 
			
		||||
 | 
			
		||||
  tags:
 | 
			
		||||
    - always
 | 
			
		||||
  vars:
 | 
			
		||||
    # Set this fact to allow the bootstrap play to run using the native system python
 | 
			
		||||
    # interpreter. A variable defined here is only in scope while this specific play
 | 
			
		||||
    # is being run; once this play is done this value is dropped and the default value
 | 
			
		||||
    # (which is actually set in the inventory file to the interpreter created by this
 | 
			
		||||
    # play) will be used.
 | 
			
		||||
    ansible_python_interpreter: /usr/bin/python3
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Determine runtime settings
 | 
			
		||||
      set_fact:
 | 
			
		||||
        _runtime_clean: "{{ true if (clean | bool) else false }}"
 | 
			
		||||
        _runtime_update: "{{ true if (update | bool) else false }}"
 | 
			
		||||
        _runtime_update_state: "{{ 'latest' if (update | bool) else 'present' }}"
 | 
			
		||||
 | 
			
		||||
    - name: Clean bootstrap virtualenv
 | 
			
		||||
      when: _runtime_clean
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ omni_ansible_venv }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
 | 
			
		||||
    - name: Create bootstrap virtualenv directory
 | 
			
		||||
      become: true
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ omni_ansible_venv }}"
 | 
			
		||||
        state: directory
 | 
			
		||||
        owner: "{{ ansible_user }}"
 | 
			
		||||
        group: "{{ ansible_user }}"
 | 
			
		||||
        mode: 0755
 | 
			
		||||
 | 
			
		||||
    - name: Create bootstrap virtualenv
 | 
			
		||||
      command:
 | 
			
		||||
        cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
 | 
			
		||||
        creates: "{{ omni_ansible_venv }}/bin/python"
 | 
			
		||||
 | 
			
		||||
    # - name: Assign ownership of the virtualenv to ansible
 | 
			
		||||
    #   become: true
 | 
			
		||||
    #   file:
 | 
			
		||||
    #     path: "{{ omni_ansible_venv }}"
 | 
			
		||||
    #     state: directory
 | 
			
		||||
    #     owner: "{{ ansible_user }}"
 | 
			
		||||
    #     group: "{{ ansible_user }}"
 | 
			
		||||
    #     mode: 0755
 | 
			
		||||
    #     follow: false
 | 
			
		||||
 | 
			
		||||
    - name: Generate remote requirements file locally
 | 
			
		||||
      delegate_to: 127.0.0.1
 | 
			
		||||
      command:
 | 
			
		||||
        cmd: poetry export --format requirements.txt
 | 
			
		||||
      changed_when: false
 | 
			
		||||
      register: _poetry_requirements
 | 
			
		||||
 | 
			
		||||
    - name: Copy remote requirements file
 | 
			
		||||
      blockinfile:
 | 
			
		||||
        path: "{{ omni_ansible_venv }}/req.txt"
 | 
			
		||||
        create: true
 | 
			
		||||
        block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
 | 
			
		||||
        mode: 0644
 | 
			
		||||
 | 
			
		||||
    - name: Install remote requirements
 | 
			
		||||
      pip:
 | 
			
		||||
        executable: "{{ omni_ansible_venv }}/bin/pip"
 | 
			
		||||
        requirements: "{{ omni_ansible_venv }}/req.txt"
 | 
			
		||||
        state: present
 | 
			
		||||
 | 
			
		||||
    - name: Install CentOS 8 python bindings
 | 
			
		||||
      when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
 | 
			
		||||
      become: true
 | 
			
		||||
      dnf:
 | 
			
		||||
        state: "{{ _runtime_update_state }}"
 | 
			
		||||
        name:
 | 
			
		||||
          - python3-libselinux
 | 
			
		||||
          - python3-policycoreutils
 | 
			
		||||
          - python3-firewall
 | 
			
		||||
 | 
			
		||||
    - name: Install CentOS 7 python bindings
 | 
			
		||||
      when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
 | 
			
		||||
      become: true
 | 
			
		||||
      yum:
 | 
			
		||||
        state: "{{ _runtime_update_state }}"
 | 
			
		||||
        name:
 | 
			
		||||
          - libselinux-python
 | 
			
		||||
          - policycoreutils-python
 | 
			
		||||
          - python-firewall
 | 
			
		||||
 | 
			
		||||
    - name: Install Fedora python bindings
 | 
			
		||||
      when: ansible_distribution == "Fedora"
 | 
			
		||||
      become: true
 | 
			
		||||
      dnf:
 | 
			
		||||
        state: "{{ _runtime_update_state }}"
 | 
			
		||||
        name:
 | 
			
		||||
          - libselinux-python
 | 
			
		||||
          - policycoreutils-python
 | 
			
		||||
          - python3-firewall
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Check meta environment
 | 
			
		||||
  hosts: all
 | 
			
		||||
  tags:
 | 
			
		||||
    - always
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Check required operating system
 | 
			
		||||
      when: omni_os is defined
 | 
			
		||||
      assert:
 | 
			
		||||
        that:
 | 
			
		||||
          - omni_os.name == ansible_distribution | lower
 | 
			
		||||
          - omni_os.version_major == ansible_distribution_major_version
 | 
			
		||||
        fail_msg: >-
 | 
			
		||||
          Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}',
 | 
			
		||||
          expected '{{ omni_os.name }} {{ omni_os.version_major }}'
 | 
			
		||||
        success_msg: >-
 | 
			
		||||
          Remote is running expected OS '{{ ansible_distribution }}
 | 
			
		||||
          {{ ansible_distribution_major_version }}'
 | 
			
		||||
 | 
			
		||||
    - name: Check required interpreter settings
 | 
			
		||||
      assert:
 | 
			
		||||
        that:
 | 
			
		||||
          - ansible_python_interpreter.startswith(omni_ansible_venv) is true
 | 
			
		||||
        fail_msg: >-
 | 
			
		||||
          Interpreter '{{ ansible_python_interpreter }}'
 | 
			
		||||
          is not in the expected venv '{{ omni_ansible_venv }}'
 | 
			
		||||
        success_msg: Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"
 | 
			
		||||
@@ -1,72 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
- name: Bootstrap remote ansible environment
 | 
			
		||||
  hosts: all
 | 
			
		||||
  gather_facts: false
 | 
			
		||||
  become: true
 | 
			
		||||
  tags:
 | 
			
		||||
    - always
 | 
			
		||||
    - meta
 | 
			
		||||
  vars:
 | 
			
		||||
    ansible_python_interpreter: /usr/bin/python3
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Clean bootstrap virtualenv
 | 
			
		||||
      when: omni_force_reinstall is defined
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ omni_ansible_venv }}"
 | 
			
		||||
        state: absent
 | 
			
		||||
 | 
			
		||||
    - name: Create bootstrap virtualenv
 | 
			
		||||
      command:
 | 
			
		||||
        cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
 | 
			
		||||
        creates: "{{ omni_ansible_venv }}/bin/python"
 | 
			
		||||
 | 
			
		||||
    - name: Generate remote requirements file locally
 | 
			
		||||
      become: false
 | 
			
		||||
      delegate_to: 127.0.0.1
 | 
			
		||||
      command:
 | 
			
		||||
        cmd: poetry export --format requirements.txt
 | 
			
		||||
      changed_when: false
 | 
			
		||||
      register: _poetry_requirements
 | 
			
		||||
 | 
			
		||||
    - name: Copy remote requirements file
 | 
			
		||||
      blockinfile:
 | 
			
		||||
        path: "{{ omni_ansible_venv }}/req.txt"
 | 
			
		||||
        create: true
 | 
			
		||||
        block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
 | 
			
		||||
 | 
			
		||||
    - name: Install remote requirements
 | 
			
		||||
      pip:
 | 
			
		||||
        executable: "{{ omni_ansible_venv }}/bin/pip"
 | 
			
		||||
        requirements: "{{ omni_ansible_venv }}/req.txt"
 | 
			
		||||
        state: present
 | 
			
		||||
 | 
			
		||||
    - name: Assign ownership of the virtualenv to ansible
 | 
			
		||||
      file:
 | 
			
		||||
        path: "{{ omni_ansible_venv }}"
 | 
			
		||||
        state: directory
 | 
			
		||||
        owner: ansible
 | 
			
		||||
        group: ansible
 | 
			
		||||
        recurse: true
 | 
			
		||||
        follow: false
 | 
			
		||||
 | 
			
		||||
- name: Check meta environment
 | 
			
		||||
  hosts: all
 | 
			
		||||
  tags:
 | 
			
		||||
    - always
 | 
			
		||||
    - meta
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Check required operating system
 | 
			
		||||
      when: omni_os is defined
 | 
			
		||||
      assert:
 | 
			
		||||
        that:
 | 
			
		||||
          - omni_os.name == ansible_distribution | lower
 | 
			
		||||
          - omni_os.version_major == ansible_distribution_major_version
 | 
			
		||||
        fail_msg: "Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}', expected '{{ omni_os.name }} {{ omni_os.version_major }}'"
 | 
			
		||||
        success_msg: "Remote is running expected OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}'"
 | 
			
		||||
 | 
			
		||||
    - name: Check required interpreter settings
 | 
			
		||||
      assert:
 | 
			
		||||
        that:
 | 
			
		||||
          - ansible_python_interpreter.startswith(omni_ansible_venv) is true
 | 
			
		||||
        fail_msg: "Interpreter '{{ ansible_python_interpreter }}' is not in the expected venv '{{ omni_ansible_venv }}'"
 | 
			
		||||
        success_msg: "Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"
 | 
			
		||||
							
								
								
									
										29
									
								
								playbooks/provision-common.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								playbooks/provision-common.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,29 @@
 | 
			
		||||
---
 | 
			
		||||
- import_playbook: initialize.yml
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- name: Configure system settings
 | 
			
		||||
  hosts: all
 | 
			
		||||
  vars_files:
 | 
			
		||||
    - vars/packages.yml
 | 
			
		||||
  pre_tasks:
 | 
			
		||||
    - import_tasks: tasks/centos-8-kernelplus.yml
 | 
			
		||||
  tasks:
 | 
			
		||||
    - import_tasks: tasks/packages/clean.yml
 | 
			
		||||
      when: _runtime_clean is true
 | 
			
		||||
 | 
			
		||||
    - import_tasks: tasks/packages/repos.yml
 | 
			
		||||
 | 
			
		||||
    - import_tasks: tasks/packages/update.yml
 | 
			
		||||
      when: _runtime_update is true
 | 
			
		||||
 | 
			
		||||
    - import_tasks: tasks/packages/install.yml
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-network.yml
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-mgmt.yml
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- import_playbook: configure-env.yml
 | 
			
		||||
							
								
								
									
										2
									
								
								playbooks/provision-datastore.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								playbooks/provision-datastore.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
---
 | 
			
		||||
# TBW
 | 
			
		||||
							
								
								
									
										61
									
								
								playbooks/provision-swarm.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								playbooks/provision-swarm.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,61 @@
 | 
			
		||||
---
 | 
			
		||||
# TBW
 | 
			
		||||
 | 
			
		||||
# - import_playbook: provision-common.yml
 | 
			
		||||
#
 | 
			
		||||
#
 | 
			
		||||
# - name: Install and start Docker
 | 
			
		||||
#   hosts: virtualization
 | 
			
		||||
#   tasks:
 | 
			
		||||
#     - import_tasks: tasks/docker/install.yml
 | 
			
		||||
#
 | 
			
		||||
#     - name: Start and enable docker service
 | 
			
		||||
#       become: true
 | 
			
		||||
#       systemd:
 | 
			
		||||
#         name: docker
 | 
			
		||||
#         state: started
 | 
			
		||||
#         enabled: yes
 | 
			
		||||
#
 | 
			
		||||
#     - name: Allow swarm traffic through the firewall
 | 
			
		||||
#       become: true
 | 
			
		||||
#       firewalld:
 | 
			
		||||
#         zone: trusted
 | 
			
		||||
#         interface: "{{ item.key }}"
 | 
			
		||||
#         permanent: true
 | 
			
		||||
#         state: enabled
 | 
			
		||||
#
 | 
			
		||||
#
 | 
			
		||||
# - name: Configure swarm master
 | 
			
		||||
#   hosts: "{{ omni_docker_swarm_manager }}"
 | 
			
		||||
#   tasks:
 | 
			
		||||
#     - name: Initialize swarm
 | 
			
		||||
#       docker_swarm:
 | 
			
		||||
#         state: present
 | 
			
		||||
#         advertise_addr: "{{ omni_docker_swarm_iface }}"
 | 
			
		||||
#
 | 
			
		||||
#     - name: Set swarm master to DRAIN
 | 
			
		||||
#       docker_node:
 | 
			
		||||
#         hostname: "{{ ansible_host }}"
 | 
			
		||||
#         availability: drain
 | 
			
		||||
#
 | 
			
		||||
# - name: Configure swarm nodes
 | 
			
		||||
#   hosts:
 | 
			
		||||
#     - remus
 | 
			
		||||
#     - romulus
 | 
			
		||||
#   tags: docker-nodes
 | 
			
		||||
#   tasks:
 | 
			
		||||
#     - name: Fetch docker swarm information
 | 
			
		||||
#       delegate_to: jupiter
 | 
			
		||||
#       docker_swarm_info:
 | 
			
		||||
#       register: _swarm_info
 | 
			
		||||
#
 | 
			
		||||
#     - name: Join workers to swarm
 | 
			
		||||
#       docker_swarm:
 | 
			
		||||
#         state: join
 | 
			
		||||
#         remote_addrs: ["jupiter.svr.local"]
 | 
			
		||||
#         join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
 | 
			
		||||
#         advertise_addr: "{{ omni_docker_swarm_iface }}"
 | 
			
		||||
#
 | 
			
		||||
# # docker plugin install --alias glusterfs trajano/glusterfs-volume-plugin:v2.0.3 --grant-all-permissions --disable
 | 
			
		||||
# # docker plugin set glusterfs SERVERS=jupiter.svr.local,remus.svr.local,romulus.svr.local
 | 
			
		||||
# # docker plugin enable glusterfs
 | 
			
		||||
							
								
								
									
										16
									
								
								playbooks/provision.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								playbooks/provision.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,16 @@
 | 
			
		||||
---
 | 
			
		||||
# First: meta setup. Check everything is as we expect and that we have a remote
 | 
			
		||||
# venv with required dependencies
 | 
			
		||||
- import_playbook: initialize.yml
 | 
			
		||||
 | 
			
		||||
# Second: initial setup. Enforces the system to a "known good" state that we can
 | 
			
		||||
# work with
 | 
			
		||||
- import_playbook: provision-common.yml
 | 
			
		||||
 | 
			
		||||
# Third: setup the datastore. Lots of downstream stuff won't work without the ability
 | 
			
		||||
# to mount data storage
 | 
			
		||||
- import_playbook: provision-datastore.yml
 | 
			
		||||
 | 
			
		||||
# Finally: setup the docker swarm. Configures the workers, security, web proxy, and
 | 
			
		||||
# management system. Once done, applications are ready for deployment
 | 
			
		||||
- import_playbook: provison-swarm.yml
 | 
			
		||||
							
								
								
									
										1
									
								
								playbooks/templates
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								playbooks/templates
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
../resources
 | 
			
		||||
@@ -1,57 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
# - hosts: vm-host-plex.net.enp.one
 | 
			
		||||
#   #gather_facts: false
 | 
			
		||||
#   tasks:
 | 
			
		||||
#     - name: Query plex API (shhh) to load latest releases
 | 
			
		||||
#       get_url:
 | 
			
		||||
#         url: https://plex.tv/api/downloads/5.json
 | 
			
		||||
#         dest: "{{ plex_releases_file | default('/tmp/plexreleases.json') }}"
 | 
			
		||||
 | 
			
		||||
- hosts: plex
 | 
			
		||||
  name: Update Plex Media Server to latest version
 | 
			
		||||
  vars:
 | 
			
		||||
    plex_releases: "{{ lookup('url', 'https://plex.tv/api/downloads/5.json') | from_json }}"
 | 
			
		||||
  tasks:
 | 
			
		||||
    - name: Identifiy the proper release file
 | 
			
		||||
      when: (ansible_os_family | lower == item["distro"]) and (ansible_distribution | lower in item["label"] | lower) and (ansible_userspace_bits in item["label"])
 | 
			
		||||
      set_fact:
 | 
			
		||||
        plex_release_url: "{{ item.url }}"
 | 
			
		||||
        plex_release_checksum: "{{ item.checksum }}"
 | 
			
		||||
      loop: "{{ plex_releases['computer']['Linux']['releases'] }}"
 | 
			
		||||
 | 
			
		||||
    - name: Download package
 | 
			
		||||
      get_url:
 | 
			
		||||
        url: "{{ plex_release_url }}"
 | 
			
		||||
        checksum: sha1:{{ plex_release_checksum }}
 | 
			
		||||
        dest: /tmp/plexmediaserver-{{ plex_release_checksum }}.{{ plex_release_url.split(".")[-1] }}
 | 
			
		||||
 | 
			
		||||
    - name: Stop the PMS service
 | 
			
		||||
      become: true
 | 
			
		||||
      systemd:
 | 
			
		||||
        name: "{{ plex_service | default('plexmediaserver') }}"
 | 
			
		||||
        state: stopped
 | 
			
		||||
 | 
			
		||||
    - name: Install update package
 | 
			
		||||
      become: true
 | 
			
		||||
      block:
 | 
			
		||||
        - name: Install update package using DNF
 | 
			
		||||
          when: ansible_distribution == "Fedora"
 | 
			
		||||
          dnf:
 | 
			
		||||
            name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
 | 
			
		||||
            state: latest
 | 
			
		||||
        - name: Install update package using YUM
 | 
			
		||||
          when: ansible_distribution == "CentOS"
 | 
			
		||||
          yum:
 | 
			
		||||
            name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
 | 
			
		||||
            state: latest
 | 
			
		||||
        - name: Install update package using APT
 | 
			
		||||
          when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
 | 
			
		||||
          apt:
 | 
			
		||||
            name: /tmp/plexmediaserver-{{ plex_release_checksum }}.deb
 | 
			
		||||
            state: latest
 | 
			
		||||
 | 
			
		||||
    - name: Start the PMS service
 | 
			
		||||
      become: true
 | 
			
		||||
      systemd:
 | 
			
		||||
        name: "{{ plex_service | default('plexmediaserver') }}"
 | 
			
		||||
        state: started
 | 
			
		||||
		Reference in New Issue
	
	Block a user