Compare commits

...

46 Commits

Author SHA1 Message Date
1e1d4d75a0 Update repository meta data
Update dependencies
Add precommit config
Add readme notes
2020-12-04 15:00:20 -05:00
5a48dc5a61 Populate device parameters and settings in inventory 2020-12-04 14:55:02 -05:00
0a7b67b6c5 Overhaul playbook organizational structure
provision playbooks now establish platform-related components of the macro system
configure playbooks now configure/update/establish specific subcomponents of systems
deploy playbooks will eventually deploy specific applications onto the platform
2020-12-04 14:52:49 -05:00
39b2e4676e Update vars files with better organization and usage separation 2020-12-04 14:50:47 -05:00
2bda08fd2f Add resources directory with baseline common file/templates 2020-12-04 14:49:52 -05:00
f1639dce1e Overhaul reuse structure from role to task orientation
The overall config this will end up with is going to be nowhere
near complicated enough to require the segmented structure of roles.
A single directory of reusable tasks and resources will be much better
2020-12-04 14:47:33 -05:00
5df550669a Remove legacy content from remote 2020-12-04 14:47:03 -05:00
38ce173ad5 Add new users to minecraft 2020-04-10 16:25:34 -04:00
088123b3ce Update firewall config for docker usage 2020-03-22 21:21:39 -04:00
9970e17d2f Remove meta import from all secondary config playbooks 2020-03-22 21:21:26 -04:00
d15779f99a Add deployment playbook for intierm plex server 2020-03-22 21:20:57 -04:00
2a9f5fb965 Add deployment playbook for inteirm minecraft server 2020-03-22 21:20:31 -04:00
448e2e4423 Add docker config variables to inventory 2020-03-21 13:29:38 -04:00
0b214f734c Move common env role to configure-env playbook 2020-03-21 13:28:49 -04:00
fd2989cd66 Update configuration playbooks 2020-03-21 13:26:29 -04:00
01c882d585 Bootstrap remove venv for running ansible from
God pip is just the absolute goddamn worst holy shit
It took me like two hours to dig myself out of the compatibility problems
trying to install docker bindings to the system python gave me. This will
teach me to never install anything to the system python ever again. God I
hate pip
2020-03-21 13:19:32 -04:00
306eda9c3c Fix docker installation process on cent8 2020-03-21 13:16:43 -04:00
09892ddc3d Add docker-ce to package update exclusion list 2020-03-21 13:15:52 -04:00
6a825e1dd7 Fix permissions on networkd files` 2020-03-21 13:14:42 -04:00
bef40c64c6 Add docker dependencies to pyproject 2020-03-21 13:11:42 -04:00
cadb79cd26 Add fix permissions on authorized key files 2020-03-21 13:10:08 -04:00
58431d1d78 Add reboot to kernel installation tasks 2020-03-21 13:09:14 -04:00
dc1395daf1 Refactor roles to support new variable schema
Add common-env and docker roles
2020-03-17 22:51:10 -04:00
1f3ca79d04 Add configuration playbooks for setting up various system components 2020-03-17 22:50:54 -04:00
33ef563375 Add hosts remus, romulus, and router 2020-03-17 22:50:37 -04:00
2e34cf9c85 Add tasks for installing kernel plus on centos8 2020-03-17 22:50:18 -04:00
5c1d5a3a5c Setup configure-auth playbook to deploy authentication parameters 2020-03-17 22:49:57 -04:00
6544f30114 Update sshkey deployment to work with new variable system 2020-03-17 22:49:34 -04:00
182cdb20ae Add interpreter check to meta playbook 2020-03-17 22:49:06 -04:00
c59b9f54bb Add preprocessing tasks and auth playbook
Update secrets submodule
2020-02-11 23:26:05 -05:00
bb3578f997 Restructure repository, removing old stuff 2020-02-11 23:17:43 -05:00
2fa6554b9d Update en2 with new variable names 2020-02-11 23:16:58 -05:00
75c709a5a5 Remove unused hosts from inventory until they can be updated 2020-02-11 23:15:58 -05:00
e81fc750cb Convert from pipenv to poetry 2020-02-11 23:15:34 -05:00
27dd062900 Remove failed networkd service on cent 8
¯\_(ツ)_/¯
2019-12-01 14:47:09 -05:00
b98e464f07 Update packages role to better handle cent7 vs 8 2019-12-01 14:46:49 -05:00
d5fdbbb9aa Restructure local user preprocessing tasks 2019-12-01 14:15:21 -05:00
c32fc17550 Add local auth and ssh keyauth deployment playbooks 2019-12-01 14:14:25 -05:00
6b5df945de Update user targets, remove password hashes
Update secrets submodule
2019-11-23 21:43:39 -05:00
23a6dfcc62 Restructure inventory to yaml format, consolidate hostvars 2019-11-23 21:40:17 -05:00
5cedb22b51 Cleanup init playbook and unused roles 2019-11-23 21:05:01 -05:00
3dc96bbf4d Adapt dependency playbook to work with cent 7+8
Fix bug in bashrc ps1
2019-11-23 21:04:29 -05:00
4ae14c54dc Add secrets submodule so we can be secure and stuff 2019-11-23 20:26:59 -05:00
b3f9e8fc80 Debug and fix networkd role to work with cent8 2019-11-23 19:18:40 -05:00
8f839a4944 Make username bold in PS1 2019-11-23 19:18:08 -05:00
62b95d4e22 Trying a whole buncha stuff 2019-11-23 14:30:39 -05:00
113 changed files with 2786 additions and 1601 deletions

8
.gitignore vendored
View File

@@ -1,3 +1,7 @@
# ---> Ansible
*.retry
*.swp
playbooks/testing.yml
.mypy_cache/*
*.idea
**/__pycache__/
.venv/

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "vars/secrets"]
path = vars/secrets
url = git@vcs.enp.one:omni/omni-ansible-secrets.git

28
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,28 @@
---
# All of the pre-commit hooks here actually use the `pytyhon` pre-commit language
# setting. However, for the python language setting, pre-commit will create and manage
# a cached virtual environment for each hook ID and do a bare `pip install <repo>` into
# the venv to setup the hook. This can result in conflicting dependency versions between
# the version installed to the pre-commit venv and the version installed to the Poetry
# venv specified in the lockfile.
#
# The solution is to specify `language: system` for all hooks and then install the
# required dependencies to the Poetry venv. The `system` language skips the isolated
# venv creation and looks for the entrypoint specified by the hook in the global
# environment which, if running in the Poetry venv, will find the entrypoint provided
# by the Poetry-managed dependency.
#
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.3.0
hooks:
- id: end-of-file-fixer
language: system
- id: fix-encoding-pragma
args:
- "--remove"
language: system
- id: trailing-whitespace
language: system
- id: check-merge-conflict
language: system

13
Pipfile
View File

@@ -1,13 +0,0 @@
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
ansible = "*"
paramiko = "*"
[requires]
python_version = "3.7"

213
Pipfile.lock generated
View File

@@ -1,213 +0,0 @@
{
"_meta": {
"hash": {
"sha256": "d6313730a0cb1941be53cf8e8e42bf51eb078f1edccefa3fc31fe38f7e36fcd2"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.7"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {
"ansible": {
"hashes": [
"sha256:a0153e2de3619b7e307df179cd91a3c3804cf1fe048273fe4ea5238b76679ff1"
],
"index": "pypi",
"version": "==2.8.4"
},
"asn1crypto": {
"hashes": [
"sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",
"sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49"
],
"version": "==0.24.0"
},
"bcrypt": {
"hashes": [
"sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89",
"sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42",
"sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294",
"sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161",
"sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31",
"sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5",
"sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c",
"sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0",
"sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de",
"sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e",
"sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052",
"sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09",
"sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105",
"sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133",
"sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
"sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
],
"version": "==3.1.7"
},
"cffi": {
"hashes": [
"sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774",
"sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d",
"sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90",
"sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b",
"sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63",
"sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45",
"sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25",
"sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3",
"sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b",
"sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647",
"sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016",
"sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4",
"sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb",
"sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753",
"sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7",
"sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9",
"sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f",
"sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8",
"sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f",
"sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc",
"sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42",
"sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3",
"sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909",
"sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45",
"sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d",
"sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512",
"sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff",
"sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201"
],
"version": "==1.12.3"
},
"cryptography": {
"hashes": [
"sha256:24b61e5fcb506424d3ec4e18bca995833839bf13c59fc43e530e488f28d46b8c",
"sha256:25dd1581a183e9e7a806fe0543f485103232f940fcfc301db65e630512cce643",
"sha256:3452bba7c21c69f2df772762be0066c7ed5dc65df494a1d53a58b683a83e1216",
"sha256:41a0be220dd1ed9e998f5891948306eb8c812b512dc398e5a01846d855050799",
"sha256:5751d8a11b956fbfa314f6553d186b94aa70fdb03d8a4d4f1c82dcacf0cbe28a",
"sha256:5f61c7d749048fa6e3322258b4263463bfccefecb0dd731b6561cb617a1d9bb9",
"sha256:72e24c521fa2106f19623a3851e9f89ddfdeb9ac63871c7643790f872a305dfc",
"sha256:7b97ae6ef5cba2e3bb14256625423413d5ce8d1abb91d4f29b6d1a081da765f8",
"sha256:961e886d8a3590fd2c723cf07be14e2a91cf53c25f02435c04d39e90780e3b53",
"sha256:96d8473848e984184b6728e2c9d391482008646276c3ff084a1bd89e15ff53a1",
"sha256:ae536da50c7ad1e002c3eee101871d93abdc90d9c5f651818450a0d3af718609",
"sha256:b0db0cecf396033abb4a93c95d1602f268b3a68bb0a9cc06a7cff587bb9a7292",
"sha256:cfee9164954c186b191b91d4193989ca994703b2fff406f71cf454a2d3c7327e",
"sha256:e6347742ac8f35ded4a46ff835c60e68c22a536a8ae5c4422966d06946b6d4c6",
"sha256:f27d93f0139a3c056172ebb5d4f9056e770fdf0206c2f422ff2ebbad142e09ed",
"sha256:f57b76e46a58b63d1c6375017f4564a28f19a5ca912691fd2e4261b3414b618d"
],
"version": "==2.7"
},
"jinja2": {
"hashes": [
"sha256:065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013",
"sha256:14dd6caf1527abb21f08f86c784eac40853ba93edb79552aa1e4b8aef1b61c7b"
],
"version": "==2.10.1"
},
"markupsafe": {
"hashes": [
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"
],
"version": "==1.1.1"
},
"paramiko": {
"hashes": [
"sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf",
"sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041"
],
"index": "pypi",
"version": "==2.6.0"
},
"pycparser": {
"hashes": [
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
],
"version": "==2.19"
},
"pynacl": {
"hashes": [
"sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255",
"sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c",
"sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e",
"sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae",
"sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621",
"sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56",
"sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39",
"sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310",
"sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1",
"sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a",
"sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786",
"sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b",
"sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b",
"sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f",
"sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20",
"sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415",
"sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715",
"sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1",
"sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0"
],
"version": "==1.3.0"
},
"pyyaml": {
"hashes": [
"sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9",
"sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4",
"sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8",
"sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696",
"sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34",
"sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9",
"sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73",
"sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299",
"sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b",
"sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae",
"sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681",
"sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41",
"sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8"
],
"version": "==5.1.2"
},
"six": {
"hashes": [
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
],
"version": "==1.12.0"
}
},
"develop": {}
}

View File

@@ -1,3 +1,41 @@
# omni-ansible
Network Ansible configurations
Network Ansible configurations
* The `omni_*` prefix is used for custom variables defined and used internally to
distinguish them from `ansible_*` or other variables. The `_runtime_` prefix should
be used for runtime variables
* Passing `clean=true` should force cleaning any and all cached stuff
* Passing `update=true` should update any unpinned _things_ to their latest version
Organizational notes:
* Playbooks should be platform/device agnostic. Any playbook should be runnable against
any device. If the config a playbook deploys isn't applicable to that device then the
playbook should be laid out so that it skips any inapplicable hosts.
* Building from that, platform-conditionals should go in task files: `when` conditions
in playbooks should be limited to runtime conditions.
Target notes:
* The `'mgmt'` target grants remote management access. This usually means SSH + local
login access, but can also mean web interface (cockpit, erx, etc)
General workflow:
1. Run `provision.yml` - this gets the entire environment into a ready-to-go state but
does not deploy any actual applications or perform any target tasks
2. Run one or more `deploy-*.yml` - this deploys the application noted to the system
3. Run one or more `do-*.yml` - this performs one off tasks
## local env creation
Requires Poetry-1.1+
```bash
git clone https://vcs.enp.one/omni/omni-ansible.git
cd omni-ansible/
poetry install
```

60
en1.yml Normal file
View File

@@ -0,0 +1,60 @@
---
all:
vars:
ansible_user: ansible
ansible_python_interpreter: /opt/ansible/bin/python
omni_ansible_venv: /opt/ansible
update: false
clean: false
omni_host_swarm_controller: jupiter
omni_host_webproxy: jupiter
children:
servers:
children:
virtualization: {}
virtualization:
vars:
omni_local_hosts:
- hostname: jupiter.svr.local
ip: 192.168.42.10
- hostname: remus.svr.local
ip: 192.168.42.20
- hostname: romulus.svr.local
ip: 192.168.42.30
hosts:
jupiter:
ansible_host: jupiter.net.enp.one
omni_description: EN1 System Control Server
omni_docker_swarm_iface: eno2
omni_networking:
eno1:
dhcp: true
dhcp_address: 10.42.101.10/42
eno2:
dhcp: false
addresses: ["192.168.42.10/24"]
children:
worker:
hosts:
remus:
ansible_host: remus.net.enp.one
omni_description: EN1 Hypervisor/Datastore
omni_networking:
eno1:
dhcp: true
dhcp_address: 10.42.101.20/24
eno2:
dhcp: false
addresses: ["192.168.42.20/24"]
romulus:
ansible_host: romulus.net.enp.one
omni_description: EN1 Hypervisor/Datastore
omni_networking:
eno1:
dhcp: true
dhcp_address: 10.42.101.30/24
eno2:
dhcp: false
addresses: ["192.168.42.30/24"]

14
en2.yml Normal file
View File

@@ -0,0 +1,14 @@
---
all:
vars:
ansible_user: ansible
omni_protected_users: ["root", "ansible"]
ungrouped:
hosts:
nimbus-1:
ansible_host: en2.enp.one
omni_description: EN2 Digitial Ocean Cloud Server
omni_os:
name: centos
version: "7"
omni_targets: ["admin", "network"]

View File

@@ -1,8 +0,0 @@
---
ansible_user: ansible
disable_gnome_user_list: True
protected_users:
- root
- ansible

View File

@@ -1,12 +0,0 @@
---
enable_gui: False
enable_ssh: True
enable_ssh_password_auth: False
disable_sudo_password: True
enable_networkd: True
generate_keys: False

View File

@@ -1,12 +0,0 @@
---
enable_gui: False
enable_ssh: True
enable_ssh_password_auth: False
disable_sudo_password: False
enable_networkd: True
generate_keys: False

View File

@@ -1,12 +0,0 @@
---
enable_gui: False
enable_ssh: True
enable_ssh_password_auth: False
disable_sudo_password: True
enable_networkd: True
generate_keys: False

View File

@@ -1,12 +0,0 @@
---
enable_gui: True
enable_ssh: False
enable_ssh_password_auth: False
disable_sudo_password: False
enable_networkd: False
generate_keys: False

View File

@@ -1,6 +0,0 @@
---
description: "EN1 Reverse Proxy / EN1 VPN Server"
targets:
- admin
- vpn

View File

@@ -1,8 +0,0 @@
---
description: "Wandering excursion"
targets:
- admin
- workstations
ansible_python_interpreter: /usr/bin/python3

View File

@@ -1,6 +0,0 @@
---
description: "EN2 Digitial Ocean Cloud Server"
targets:
- admin
- web

View File

@@ -1,5 +0,0 @@
---
description: "EN1 Secondary Datastore"
targets:
- admin
- datastore

View File

@@ -1,6 +0,0 @@
---
description: "And the Last"
targets:
- admin
- workstations

View File

@@ -1,9 +0,0 @@
---
description: "EN1 Primary Datastore / EN1 Secondary Hypervisor"
targets:
- admin
- datastore
networking:
ovirtmgt:

View File

@@ -1,10 +0,0 @@
---
description: "EN1 Primary Hypervisor"
targets:
- admin
- datastore
- hypervisor
networking:
ovirtmgt:

View File

@@ -1,33 +0,0 @@
---
description: EN1 Core Router
ansible_network_os: edgeos
targets:
- admin
- network
network:
ethernet_eth0:
address: dhcp
description: UPLINK
extra:
- duplex auto
- speed auto
ethernet_eth1:
address: 10.42.100.1/24
description: PUBLIC
extra:
- duplex auto
- speed auto
ethernet_eth2:
address: 10.42.101.1/24
description: PRIVATE
extra:
- duplex auto
- speed auto
ethernet_eth2_vif_10:
address: 10.42.102.1/24
description: SECURE
extra:
- mtu 1500

View File

@@ -1,8 +0,0 @@
---
description: "Smooth as Silk"
targets:
- admin
- workstations
ansible_python_interpreter: /usr/bin/python3

View File

@@ -1,8 +0,0 @@
---
description: "Watcher who Watches the Watchmen"
targets:
- admin
- workstations
ansible_python_interpreter: /usr/bin/python3

View File

@@ -1,5 +0,0 @@
---
description: "Database Host: MariaDB"
targets:
- admin

View File

@@ -1,5 +0,0 @@
---
description: "Database Host: MySQL"
targets:
- admin

View File

@@ -1,5 +0,0 @@
---
description: "Database Host: PrometheusDB"
targets:
- admin

View File

@@ -1,6 +0,0 @@
---
description: "Development Host: Nginx Web Server"
targets:
- admin
- web

View File

@@ -1,9 +0,0 @@
---
description: "Application Host: Bitwarden"
targets:
- admin
- bitwarden
networking:
eth0:

View File

@@ -1,9 +0,0 @@
---
description: "Application Host: Gitea"
targets:
- admin
- gitea
networking:
eth0:

View File

@@ -1,9 +0,0 @@
---
description: "Application Host: Minecraft"
targets:
- admin
- minecraft
networking:
eth0:

View File

@@ -1,9 +0,0 @@
---
description: "Application Host: Nextcloud"
targets:
- admin
- nextcloud
networking:
eth0:

View File

@@ -1,6 +0,0 @@
---
description: "Application Host: Plex Media Server"
targets:
- admin
- plex

View File

@@ -1,25 +0,0 @@
[network]
router.net.enp.one
[servers]
romulus.net.enp.one
remus.net.enp.one
novis.tre2.local
[vms]
vm-db-mysql.net.enp.one
vm-dev-nginx.net.enp.one
vm-host-gitea.net.enp.one
vm-host-plex.net.enp.one
vm-host-bitwarden.net.enp.one
vm-host-nextcloud.net.enp.one
vm-host-minecraft.net.enp.one
[cloud]
nimbus-1.net.enp.one
[workstations]
omega
vigil-nox.tre2.local
serico-nox.tre2.local
inerro.tre2.local

View File

View File

@@ -0,0 +1,89 @@
---
- name: Configure environment
hosts: all
tasks:
- name: Set hostname
become: true
hostname:
name: "{{ ansible_host }}"
- import_tasks: tasks/sshd/banner.yml
- name: Install global bash components
become: true
copy:
src: bash/{{ item }}.sh
dest: /etc/profile.d/Z-{{ 10 + loop_index }}-enpn-{{ item }}.sh
mode: 0644
loop:
- global
- pyenv
- aliases
- helpers
loop_control:
index_var: loop_index
label: "{{ item }}"
- name: Disable dynamic MOTD
become: true
replace:
path: /etc/pam.d/sshd
regexp: "^session\\s+optional\\s+pam_motd\\.so.*$"
replace: "#session optional pam_motd.so"
- name: Remove legacy global bashrc
become: true
file:
path: /etc/profile.d/ZA-enpn-bashrc.sh
state: absent
- name: Disable case-sensitive autocomplete
become: true
lineinfile:
path: /etc/inputrc
line: set completion-ignore-case ((o|O)(n|ff))
create: true
mode: 0644
- name: Configure additional security settings on shared servers
hosts: servers
tasks:
- name: Identify local home directories
become: true
find:
file_type: directory
path: /home/
recurse: false
register: _local_home_dirs
- name: Determine files to write-protect
set_fact:
_secure_files: >-
{{ _secure_files | default([]) + [
item.path ~ '/.bashrc',
item.path ~ '/.bash_profile',
item.path ~ '/.ssh/authorized_keys',
item.path ~ '/.ssh/config'
] }}
loop: "{{ _local_home_dirs.files }}"
loop_control:
label: "{{ item.path }}"
- name: Fetch status of secure files
become: true
stat:
path: "{{ item }}"
loop: "{{ _secure_files }}"
loop_control:
label: "{{ item }}"
register: _secure_file_stats
- name: Restrict access to secure files
become: true
file:
path: "{{ item.item }}"
state: "{{ 'file' if item.stat.exists else 'touch' }}"
mode: 0400
loop: "{{ _secure_file_stats.results }}"
loop_control:
label: "Write-protecting: {{ item.item }}"

View File

@@ -0,0 +1,164 @@
---
- name: Configure server management services
hosts: servers
tasks:
- import_tasks: tasks/sshd/secure.yml
- name: Enable cockpit
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
systemd:
name: cockpit.socket
enabled: true
state: started
- name: Configure virtualization management services
hosts: virtualization
tasks:
- name: Create docker group
become: true
group:
name: docker
state: present
- name: Configure local accounts
hosts: all
vars_files:
- vars/accounts.yml
- vars/secrets/passwords.yml
- vars/sshkeys.yml
tasks:
- name: Create omni group
become: true
group:
name: "{{ omni_group.name }}"
gid: "{{ omni_group.gid }}"
state: present
- name: Determine existing omni users
changed_when: false
shell:
cmd: 'grep omni /etc/group | cut --delimiter : --fields 4 | tr "," "\n"'
register: _existing_omni_users
- name: Delete removed user accounts
become: true
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
user:
name: "{{ item }}"
state: absent
loop: "{{ _existing_omni_users.stdout_lines }}"
- name: Delete removed user groups
become: true
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
group:
name: "{{ item }}"
state: absent
loop: "{{ _existing_omni_users.stdout_lines }}"
- name: Delete removed user home directories
become: true
when: item not in (omni_users | items2dict(key_name='name', value_name='uid'))
file:
path: "/home/{{ item }}"
state: absent
loop: "{{ _existing_omni_users.stdout_lines }}"
- name: Create account groups
become: true
group:
name: "{{ item.name }}"
gid: "{{ item.uid }}"
state: present
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Create accounts
become: true
user:
name: "{{ item.name }}"
state: present
uid: "{{ item.uid }}"
group: "{{ item.name }}"
groups: >-
{{
[omni_group.name] +
(['wheel' if ansible_os_family | lower == 'redhat' else 'sudo'] if item.admin | default(false) else []) +
(['docker' if 'virtualization' in group_names else omni_group.name] if item.admin | default(false) else [])
}}
# The 'else omni_group.name' above is just some non-breaking value to cover the
# false condition, it doesn't have special meaning
comment: "{{ item.fullname | default('') }}"
shell: "{{ '/bin/bash' if 'mgmt' in item.targets else '/bin/false' }}"
system: "{{ item.svc | default(false) }}"
generate_ssh_key: false
password: "{{ omni_users_secrets[item.name] | default(none) }}"
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Disable sudo password for ansible
become: true
lineinfile:
create: true
path: /etc/sudoers.d/30-ansible
line: "ansible ALL=(ALL) NOPASSWD:ALL"
mode: 0644
- name: Ensure proper ownership of user home directories
become: true
file:
path: /home/{{ item.name }}
state: directory
group: "{{ item.name }}"
owner: "{{ item.name }}"
mode: 0700
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce root password
become: true
user:
name: root
password: "{{ omni_users_secrets.root }}"
state: present
- name: Create SSH directory
become: true
file:
path: /home/{{ item.name }}/.ssh
owner: "{{ item.name }}"
group: "{{ item.name }}"
state: directory
mode: 0755
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Update authorized keys
become: true
when: "'mgmt' in item.targets"
authorized_key:
user: "{{ item.name }}"
key: "{{ omni_ssh_keys[item.name] | join('\n') }}"
state: present
exclusive: true
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"
- name: Enforce ownership of authorized keys
become: true
when: "'mgmt' in item.targets"
file:
path: /home/{{ item.name }}/.ssh/authorized_keys
state: file
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: 0400
loop: "{{ omni_users }}"
loop_control:
label: "{{ item.uid }},{{ item.name }}"

View File

@@ -0,0 +1,34 @@
---
# - name: Configure router
# hosts: router
# gather_facts: false
# pre_tasks:
# - name: Collect EdgeOS facts
# edgeos_facts:
# gather_subset: "!config"
# tasks:
# - name: Configure interfaces
# edgeos_config:
# lines:
# - set interfaces ethernet eth0 address dhcp
# - set interfaces ethernet eth0 description EXTERNAL
# - set interfaces ethernet eth1 address 10.42.100.1/24
# - set interfaces ethernet eth1 address 10.42.99.1/24
# - set interfaces ethernet eth1 description LOCAL
# - set interfaces ethernet eth2 address 10.42.101.1/24
# - set interfaces ethernet eth2 description DOMAIN
- name: Configure server networking
hosts: servers
tasks:
- import_tasks: tasks/networkd/install.yml
- import_tasks: tasks/networkd/configure.yml
- import_tasks: tasks/networkd/services.yml
- name: Configure local hostsfile
become: true
lineinfile:
path: /etc/hosts
state: present
line: "{{ item.ip }} {{ item.hostname }}"
loop: "{{ omni_local_hosts | default([]) }}"

View File

@@ -0,0 +1,37 @@
---
# TBW
# - name: Install Nginx
# hosts: jupiter
# handlers:
# - name: restart_nginx
# become: true
# systemd:
# name: nginx
# state: restarted
# tasks:
# - name: Install nginx and certbot
# become: true
# dnf:
# name:
# - nginx
# - certbot
# - python3-certbot-nginx
# state: present
#
# - name: Enable and start nginx
# become: true
# systemd:
# name: nginx
# state: started
# enabled: true
#
# - name: Install configuration
# become: true
# copy:
# src: nginx.conf
# dest: /etc/nginx/nginx.conf
# notify:
# - restart_nginx
#
# # sudo setsebool -P httpd_can_network_connect on

View File

@@ -1,9 +0,0 @@
---
- hosts: all
name: Ansible python bindings
tags: always
tasks:
- import_tasks: tasks/centos/bindings.yml
when: ansible_distribution == "CentOS"
- import_tasks: tasks/fedora/bindings.yml
when: ansible_distribution == "Fedora"

View File

@@ -1,67 +0,0 @@
---
- hosts: nimbus-1.net.enp.one
name: Deploy documentation
vars:
# Local directory to use for cloning and building the documentation site
DIR_BUILD: /tmp/docs
# Remote directory to install the site at
DIR_DEPLOY: /usr/share/nginx/doc.enp.one/html
tasks:
- name: Build the static site locally
delegate_to: 127.0.0.1
block:
- name: Ensure the build directory does not exist
file:
path: "{{ DIR_BUILD }}"
state: absent
- name: Clone documentation repository
git:
repo: git@vcs.enp.one:omni/omni-docs.git
dest: "{{ DIR_BUILD }}/"
- name: Generate build env requirements file
# Generate the requirements.txt style format, pipe through grep to remove
# the index line (not sure why thats included at all tbh) and save the
# result in "requirements.txt" to usage with pip
shell: pipenv lock --requirements | grep --invert-match "\-i">requirements.txt
args:
chdir: "{{ DIR_BUILD }}/"
- name: Create build env and install requirements
pip:
requirements: "{{ DIR_BUILD }}/requirements.txt"
virtualenv: "{{ DIR_BUILD }}/venv"
virtualenv_python: python3
state: present
- name: Build the static site using mkdocs
shell: "{{ DIR_BUILD }}/venv/bin/mkdocs build"
args:
chdir: "{{ DIR_BUILD }}"
- name: Upload static site to remote
copy:
src: "{{ DIR_BUILD }}/site/"
dest: "/tmp/docs/"
- name: Remove legacy site
become: true
file:
path: "{{ DIR_DEPLOY }}"
state: absent
- name: Copy static site to deployment directory
become: true
copy:
src: "/tmp/docs/"
dest: "{{ DIR_DEPLOY }}"
remote_src: true
owner: root
group: nginx
mode: 0755
setype: httpd_sys_content_t
- name: Clean up local build directory
delegate_to: 127.0.0.1
file:
path: "{{ DIR_BUILD }}"
state: absent
- name: Clean up remote temp directory
file:
path: /tmp/docs
state: absent

View File

@@ -1,32 +0,0 @@
---
- hosts: all
name: Update ssh keys on all devices
tasks:
- import_tasks: tasks/users-preprocessing.yml
- name: Install public keys
tags: users_keys
become: true
block:
- name: Ensure SSH directory exists
file:
state: directory
path: /home/{{ item.name }}/.ssh
loop: "{{ local_users | difference([None]) }}"
- name: Put keys on remote
when: item.keys != []
authorized_key:
user: "{{ item.name }}"
key: "{{ item.sshkeys | join('\n') }}"
state: present
exclusive: yes
loop: "{{ local_users | difference([None]) }}"
- hosts: all
name: Disable SSH password authentication
tags:
- always
tasks:
- import_tasks: tasks/sshd/disable-password-auth.yml
when: enable_ssh_password_auth|bool == false

1
playbooks/files Symbolic link
View File

@@ -0,0 +1 @@
../resources

View File

@@ -1,16 +0,0 @@
function up() { cd $(eval printf '../'%.0s {1..$1}); }
alias fuck='sudo $(history -p \!\!)'
alias doc='cd ~/Documents'
alias explorer='nautilus'
alias version='uname -orp && lsb_release -a | grep Description'
alias activate='source ./bin/activate'
alias ipconfig='ip address show'
alias cls='clear'
alias mklink='ln -s'
alias ls='ls -lshF --color --group-directories-first --time-style=long-iso'
alias gg='cd ~/Git'
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\[\e[0;97m\]\[\e[37m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]$(parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "

128
playbooks/initialize.yml Normal file
View File

@@ -0,0 +1,128 @@
---
- name: Bootstrap remote ansible environment
hosts: all
tags:
- always
vars:
# Set this fact to allow the bootstrap play to run using the native system python
# interpreter. A variable defined here is only in scope while this specific play
# is being run; once this play is done this value is dropped and the default value
# (which is actually set in the inventory file to the interpreter created by this
# play) will be used.
ansible_python_interpreter: /usr/bin/python3
tasks:
- name: Determine runtime settings
set_fact:
_runtime_clean: "{{ true if (clean | bool) else false }}"
_runtime_update: "{{ true if (update | bool) else false }}"
_runtime_update_state: "{{ 'latest' if (update | bool) else 'present' }}"
- name: Clean bootstrap virtualenv
when: _runtime_clean
become: true
file:
path: "{{ omni_ansible_venv }}"
state: absent
- name: Create bootstrap virtualenv directory
become: true
file:
path: "{{ omni_ansible_venv }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0755
- name: Create bootstrap virtualenv
command:
cmd: "{{ ansible_python_interpreter }} -m venv {{ omni_ansible_venv }} --system-site-packages"
creates: "{{ omni_ansible_venv }}/bin/python"
# - name: Assign ownership of the virtualenv to ansible
# become: true
# file:
# path: "{{ omni_ansible_venv }}"
# state: directory
# owner: "{{ ansible_user }}"
# group: "{{ ansible_user }}"
# mode: 0755
# follow: false
- name: Generate remote requirements file locally
delegate_to: 127.0.0.1
command:
cmd: poetry export --format requirements.txt
changed_when: false
register: _poetry_requirements
- name: Copy remote requirements file
blockinfile:
path: "{{ omni_ansible_venv }}/req.txt"
create: true
block: "{{ _poetry_requirements.stdout_lines | join('\n') }}"
mode: 0644
- name: Install remote requirements
pip:
executable: "{{ omni_ansible_venv }}/bin/pip"
requirements: "{{ omni_ansible_venv }}/req.txt"
state: present
- name: Install CentOS 8 python bindings
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
dnf:
state: "{{ _runtime_update_state }}"
name:
- python3-libselinux
- python3-policycoreutils
- python3-firewall
- name: Install CentOS 7 python bindings
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
yum:
state: "{{ _runtime_update_state }}"
name:
- libselinux-python
- policycoreutils-python
- python-firewall
- name: Install Fedora python bindings
when: ansible_distribution == "Fedora"
become: true
dnf:
state: "{{ _runtime_update_state }}"
name:
- libselinux-python
- policycoreutils-python
- python3-firewall
- name: Check meta environment
hosts: all
tags:
- always
tasks:
- name: Check required operating system
when: omni_os is defined
assert:
that:
- omni_os.name == ansible_distribution | lower
- omni_os.version_major == ansible_distribution_major_version
fail_msg: >-
Remote is running OS '{{ ansible_distribution }} {{ ansible_distribution_major_version }}',
expected '{{ omni_os.name }} {{ omni_os.version_major }}'
success_msg: >-
Remote is running expected OS '{{ ansible_distribution }}
{{ ansible_distribution_major_version }}'
- name: Check required interpreter settings
assert:
that:
- ansible_python_interpreter.startswith(omni_ansible_venv) is true
fail_msg: >-
Interpreter '{{ ansible_python_interpreter }}'
is not in the expected venv '{{ omni_ansible_venv }}'
success_msg: Interpreter '{{ ansible_python_interpreter }}' is in the expected venv"

View File

@@ -0,0 +1,29 @@
---
- import_playbook: initialize.yml
- name: Configure system settings
hosts: all
vars_files:
- vars/packages.yml
pre_tasks:
- import_tasks: tasks/centos-8-kernelplus.yml
tasks:
- import_tasks: tasks/packages/clean.yml
when: _runtime_clean is true
- import_tasks: tasks/packages/repos.yml
- import_tasks: tasks/packages/update.yml
when: _runtime_update is true
- import_tasks: tasks/packages/install.yml
- import_playbook: configure-network.yml
- import_playbook: configure-mgmt.yml
- import_playbook: configure-env.yml

View File

@@ -0,0 +1,2 @@
---
# TBW

View File

@@ -0,0 +1,61 @@
---
# TBW
# - import_playbook: provision-common.yml
#
#
# - name: Install and start Docker
# hosts: virtualization
# tasks:
# - import_tasks: tasks/docker/install.yml
#
# - name: Start and enable docker service
# become: true
# systemd:
# name: docker
# state: started
# enabled: yes
#
# - name: Allow swarm traffic through the firewall
# become: true
# firewalld:
# zone: trusted
# interface: "{{ item.key }}"
# permanent: true
# state: enabled
#
#
# - name: Configure swarm master
# hosts: "{{ omni_docker_swarm_manager }}"
# tasks:
# - name: Initialize swarm
# docker_swarm:
# state: present
# advertise_addr: "{{ omni_docker_swarm_iface }}"
#
# - name: Set swarm master to DRAIN
# docker_node:
# hostname: "{{ ansible_host }}"
# availability: drain
#
# - name: Configure swarm nodes
# hosts:
# - remus
# - romulus
# tags: docker-nodes
# tasks:
# - name: Fetch docker swarm information
# delegate_to: jupiter
# docker_swarm_info:
# register: _swarm_info
#
# - name: Join workers to swarm
# docker_swarm:
# state: join
# remote_addrs: ["jupiter.svr.local"]
# join_token: "{{ _swarm_info.swarm_facts.JoinTokens.Worker }}"
# advertise_addr: "{{ omni_docker_swarm_iface }}"
#
# # docker plugin install --alias glusterfs trajano/glusterfs-volume-plugin:v2.0.3 --grant-all-permissions --disable
# # docker plugin set glusterfs SERVERS=jupiter.svr.local,remus.svr.local,romulus.svr.local
# # docker plugin enable glusterfs

View File

@@ -1,26 +0,0 @@
---
- hosts: vms
name: Replace NetworkManager with systemd-networkd
tasks:
- name: Install systemd-networkd
when: enable_networkd == true
block:
- import_tasks: tasks/centos/networkd.yml
when: ansible_distribution == "CentOS"
- import_tasks: tasks/fedora/networkd.yml
when: ansible_distribution == "Fedora"
# - import_tasks: common/debian/networkd.yml
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
- import_tasks: tasks/networkd/config.yml
- import_tasks: tasks/networkd/services.yml
- hosts: vms
name: Install ovirt agent
tasks:
- name: Install ovirt-agent
become: true
yum:
name: ovirt-guest-agent
state: latest

View File

@@ -1,47 +1,16 @@
---
- import_playbook: dependencies.yml
# First: meta setup. Check everything is as we expect and that we have a remote
# venv with required dependencies
- import_playbook: initialize.yml
# Second: initial setup. Enforces the system to a "known good" state that we can
# work with
- import_playbook: provision-common.yml
- hosts: all
name: Init
tags: initialize
tasks:
- name: Set hostname
become: true
hostname:
name: "{{ default_host if default_host is defined else inventory_hostname }}"
# Third: setup the datastore. Lots of downstream stuff won't work without the ability
# to mount data storage
- import_playbook: provision-datastore.yml
- name: Install global bashrc
become: true
copy:
src: bashrc.sh
dest: /etc/profile.d/global-bashrc.sh
mode: 0644
- import_tasks: tasks/sshd/banner.yml
- hosts: all
name: System packages
tags: initialize
tasks:
- name: Load package variables
include_vars:
file: packages.yml
- import_tasks: tasks/centos/repositories.yml
when: ansible_distribution == "CentOS"
- import_tasks: tasks/centos/packages.yml
when: ansible_distribution == "CentOS"
- import_tasks: tasks/fedora/packages.yml
when: ansible_distribution == "Fedora"
# - import_tasks: tasks/debian/packages.yml
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
#- import_playbook: provision-workstation.yml
#- import_playbook: provision-server.yml
#- import_playbook: provision-hypervisor.yml
- import_playbook: provision-virtual-machine.yml
- import_playbook: update.yml
# Finally: setup the docker swarm. Configures the workers, security, web proxy, and
# management system. Once done, applications are ready for deployment
- import_playbook: provison-swarm.yml

1
playbooks/roles Symbolic link
View File

@@ -0,0 +1 @@
../roles

1
playbooks/templates Symbolic link
View File

@@ -0,0 +1 @@
../resources

View File

@@ -1,8 +0,0 @@
//////////// //// //// ///////////
//// ////// //// //// ////
//////// //// /// //// ///////////
//// //// ////// ////
//////////// //// //// {{ description | default('Omni Network System') }}
_______________________________{{ description | default('Omni Network System') | length * '\\' }}\

View File

@@ -1,8 +0,0 @@
# ANSIBLE MANAGED FILE - DO NOT EDIT
[Match]
Name={{ item.key }}
[Network]
DHCP=Yes
# EOF

View File

@@ -1,57 +0,0 @@
---
# - hosts: vm-host-plex.net.enp.one
# #gather_facts: false
# tasks:
# - name: Query plex API (shhh) to load latest releases
# get_url:
# url: https://plex.tv/api/downloads/5.json
# dest: "{{ plex_releases_file | default('/tmp/plexreleases.json') }}"
- hosts: vm-host-plex.net.enp.one
name: Update Plex Media Server to latest version
vars:
plex_releases: "{{ lookup('url', 'https://plex.tv/api/downloads/5.json') | from_json }}"
tasks:
- name: Identifiy the proper release file
when: (ansible_os_family | lower == item["distro"]) and (ansible_distribution | lower in item["label"] | lower) and (ansible_userspace_bits in item["label"])
set_fact:
plex_release_url: "{{ item.url }}"
plex_release_checksum: "{{ item.checksum }}"
loop: "{{ plex_releases['computer']['Linux']['releases'] }}"
- name: Download package
get_url:
url: "{{ plex_release_url }}"
checksum: sha1:{{ plex_release_checksum }}
dest: /tmp/plexmediaserver-{{ plex_release_checksum }}.{{ plex_release_url.split(".")[-1] }}
- name: Stop the PMS service
become: true
systemd:
name: "{{ plex_service | default('plexmediaserver') }}"
state: stopped
- name: Install update package
become: true
block:
- name: Install update package using DNF
when: ansible_distribution == "Fedora"
dnf:
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
state: latest
- name: Install update package using YUM
when: ansible_distribution == "CentOS"
yum:
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.rpm
state: latest
- name: Install update package using APT
when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
apt:
name: /tmp/plexmediaserver-{{ plex_release_checksum }}.deb
state: latest
- name: Start the PMS service
become: true
systemd:
name: "{{ plex_service | default('plexmediaserver') }}"
state: started

View File

@@ -1,24 +0,0 @@
---
- hosts: all
name: Upgrade packages
tasks:
- name: Upgrade YUM packages
when: ansible_distribution == "CentOS"
become: true
yum:
state: latest
name: "*"
exclude: kernel*{{ ',' + exclude_upgrade | default('') }}
- name: Upgrade DNF packages
when: ansible_distribution == "Fedora"
become: true
dnf:
state: latest
name: "*"
exclude: kernel*{{ ',' + exclude_upgrade | default('') }}
# - name: Upgrade APT packages
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
# become: true
# apt:

View File

@@ -1,132 +0,0 @@
---
- import_playbook: dependencies.yml
- hosts: all:!network
name: Update local user accounts and access controls
tasks:
- import_tasks: tasks/users-preprocessing.yml
- name: Create local user accounts
tags: users_create
become: true
block:
- name: Create groups
group:
name: "{{ item }}"
state: present
loop: "{{ targets + ['omni'] }}"
- name: Create users
user:
name: "{{ item.name }}"
comment: "{{ item.fullname | default('') }}"
shell: /bin/bash
groups: "{{ item.targets | intersect(targets) + ['omni'] }}"
system: "{{ item.svc | default(False) }}"
state: present
generate_ssh_key: "{{ True if generate_keys | bool == true else False }}"
ssh_key_comment: "{{ item.name }}@{{ inventory_hostname }}"
ssh_key_bits: 4096
ssh_key_type: ed25519
password: "{{ item.password }}"
loop: "{{ local_users }}"
- name: Delete removed user accounts
become: true
user:
name: "{{ item }}"
state: absent
loop: "{{ local_removed_users | difference(protected_users) }}"
- name: Grant sudo permissions to admin user accounts
become: true
user:
name: "{{ item.name }}"
groups: "{{ 'wheel' if ansible_os_family | lower == 'redhat' else 'sudo' }}"
state: present
loop: "{{ local_admin_users }}"
- name: Disable sudo password for ansible
become: true
lineinfile:
create: true
path: /etc/sudoers.d/30-ansible
line: "ansible ALL=(ALL) NOPASSWD:ALL"
mode: 0644
- name: Disable sudo password for admin users
become: true
lineinfile:
create: true
path: /etc/sudoers.d/40-admin
line: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
mode: 0644
state: "{{ 'absent' if disable_sudo_password | bool == false else 'present' }}"
loop: "{{ local_admin_users }}"
- name: Configure GNOME
tags: users_gnome
when: ansible_distribution == "Fedora" and disable_gnome_user_list | bool == true
become: true
block:
- name: Configure GDM profile
blockinfile:
create: true
path: /etc/dconf/profile/gdm
block: |
user-db:user
system-db:gdm
file-db:/usr/share/gdm/greeter-dconf-defaults
- name: Configure GDM keyfile
blockinfile:
create: true
path: /etc/dconf/db/gdm.d/00-login-screen
block: |
[org/gnome/login-screen]
# Do not show the user list
disable-user-list=true
- name: Delete existing user database
file:
path: /var/lib/gdm/.config/dconf/user
state: absent
- name: Restart dconf database
shell: dconf update
- name: Ensure proper ownership of user home directories
become: true
file:
group: "{{ item.name }}"
owner: "{{ item.name }}"
path: /home/{{ item.name }}
recurse: true
state: directory
loop: "{{ local_users }}"
# - hosts: router.net.enp.one
# name: Configure users on router
# connection: network_cli
# vars:
# ansible_network_os: edgeos
# tasks:
# - import_tasks: tasks/users-preprocessing.yml
#
# - name: Create users
# edgeos_config:
# lines:
# - set system login user {{ item.name }} authentication encrypted-password "{{ item.password }}"
# - set system login user {{ item.name }} full-name "{{ item.fullname if item.fullname is defined else "" }}"
# - set system login user {{ item.name }} level {{ 'operator' if item.name != 'ansible' else 'admin' }}
# loop: "{{ local_users | difference([None]) }}"
#
# - name: Grant administrative access to admin users
# edgeos_config:
# lines:
# - set system login user {{ item.name }} level admin
# loop: "{{ local_admin_users | difference([None]) }}"
#
# - name: Assemble key files for loadkey usage
# edgeos_command:
# commands: sudo tee /tmp/{{ item.name }}.keys<<<"{{ item.sshkeys | join('\n') }}"
# loop: "{{ local_admin_users | difference([None]) }}"
#
# - import_playbook: deploy-sshkeys.yml

View File

@@ -1,34 +0,0 @@
---
- hosts: router.net.enp.one
name: Configure users on router
connection: network_cli
gather_facts: false
tasks:
- import_tasks: tasks/users-preprocessing.yml
- name: Create users
edgeos_config:
lines:
- set system login user {{ item.name }} authentication encrypted-password "{{ item.password }}"
- set system login user {{ item.name }} full-name "{{ item.fullname if item.fullname is defined else "" }}"
- set system login user {{ item.name }} level {{ 'operator' if item.name != 'ansible' else 'admin' }}
loop: "{{ local_users | difference([None]) }}"
- name: Grant administrative access to admin users
edgeos_config:
lines:
- set system login user {{ item.name }} level admin
with_items:
- "{{ local_admin_users | difference([None]) }}"
- name: Assemble loadkey files
edgeos_command:
commands:
- sudo tee "{{ item.sshkeys | join('\n') }}"<<</tmp/{{ item.name }}.keys
loop: "{{ local_admin_users | difference([None]) }}"
- name: Load keys
edgeos_config:
lines:
- loadkey {{ item }} /tmp/{{ item }}.keys
loop: "{{ local_admin_users | difference([None]) }}"

View File

@@ -1,5 +0,0 @@
---
- import_playbook: dependencies.yml
- import_playbook: update-system.yml
- import_playbook: update-users-local.yml

1509
poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

23
pyproject.toml Normal file
View File

@@ -0,0 +1,23 @@
[tool.poetry]
name = "omni-ansible"
version = "0.0.0"
description = "Network deployment procedures and configuration state"
authors = ["Ethan Paul <me@enp.one>"]
license = "MIT"
[tool.poetry.dependencies]
python = "^3.7"
ansible = "^2.9.4"
docker = "^4.2.0"
docker-compose = "^1.25.4"
paramiko = "^2.7.1"
[tool.poetry.dev-dependencies]
ansible-lint = "^4.2.0"
ansible-toolbox = "^0.3"
pre-commit = "^2.9.2"
pre-commit-hooks = "^3.3.0"
safety = "^1.9.0"
tox = "^3.20.1"
tox-poetry-installer = "^0.5.2"
yamllint = "^1.20.0"

View File

@@ -0,0 +1,4 @@
alias doc='cd ~/Documents'
alias dn='cd ~/Downloads'
alias gg='cd ~/Git'
alias explorer='nautilus'

12
resources/bash/aliases.sh Normal file
View File

@@ -0,0 +1,12 @@
alias bk='cd -'
alias fuck='sudo $(history -p \!\!)'
alias ls='ls -lshF --color --group-directories-first --time-style=long-iso'
alias version='uname -orp && lsb_release -a | grep Description'
alias activate='source ./bin/activate'
alias cls='clear'
alias ls='/usr/bin/ls -lshF --color --group-directories-first --time-style=long-iso'
alias gmtime='/usr/bin/date -u --iso-8601=seconds'
alias date='/usr/bin/date --iso-8601=seconds'
alias whatismyip='curl https://icanhazip.com/'
alias uuid="python3 -c 'import uuid; print(uuid.uuid4());'"
alias epoch="python3 -c 'import time; print(time.time());'"

7
resources/bash/global.sh Normal file
View File

@@ -0,0 +1,7 @@
function _parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\[\e[0;97m\]\[\e[37m\e[1m\]\u\[\e[1;94m\]@\[\e[94m\]\H\[\e[0;33m\]\$(_parse_git_branch) \[\e[37m\]\w\[\e[33m\] \[\e[0;97m\]$\[\e[0m\] "
export rc=/home/$USERNAME/.bashrc
export VIRTUALENV_DIR=/home/$USERNAME/.venvs

18
resources/bash/helpers.sh Normal file
View File

@@ -0,0 +1,18 @@
random() {
if [[ $# -eq 0 ]]; then
num=32
else
num=$1
fi
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $num | head -n 1
}
function up() { cd $(eval printf '../'%.0s {1..$1}); }
function pipin() { pip freeze | grep $1; }
function passhash() {
read -sp 'Password: ' tmppass;
echo $tmppass | python3 -c 'import crypt; print(crypt.crypt(input(), crypt.mksalt(crypt.METHOD_SHA512)));';
unset tmppass;
}

76
resources/bash/pyenv.sh Normal file
View File

@@ -0,0 +1,76 @@
#!/env/bash
function pyenv () {
usage="Custom Python virtualenv manager
sivenv [list, delete, load, new] [VENV]
Commands:
list List existing virtualenvs (alias: 'ls')
load VENV Activate the virtualenv named VENV (alias: 'source')
new VENV [VERSION] Create and load a new virtualenv named VENV. Optionally VERSION
can be a python version to use for creating the venv. Note that
only python3 versions are supported.
delete VENV Delete the virtualenv named VENV (alias: 'rm')";
if [ $# -eq 0 ]; then
echo "Error: no command specified" >&2;
echo "$usage";
return 1;
fi;
case $1 in
"-h"| "--help")
echo "$usage";
return 0;;
"ls"| "list")
lsvenv "$VIRTUALENV_DIR";;
"rm"| "delete")
if [ $# -ne 2 ]; then
echo "Error: no virtualenv specified" >&2;
return 1;
fi;
rm --recursive --force "${VIRTUALENV_DIR:?}/$2";;
"source" | "load")
if [ $# -ne 2 ]; then
echo "Error: no virtualenv specified" >&2;
return 1;
fi;
# shellcheck source=/dev/null
source "$VIRTUALENV_DIR/$2/bin/activate";;
"new")
if [ $# -lt 2 ]; then
echo "Error: no virtualenv specified" >&2;
return 1;
fi;
if [ $# -eq 3 ]; then
version="$3";
else
version="3";
fi
if ! command -v "python$version" &>/dev/null; then
echo "Error: no interpreter found for python version '$version'" >&2;
return 2;
fi
if python$version -m venv "$VIRTUALENV_DIR/$2"; then
echo "New virtualenv '$2' created using $(command -v python$version)" >&2;
# shellcheck source=/dev/null
source "$VIRTUALENV_DIR/$2/bin/activate"
else
return $?;
fi;;
*)
echo "Error: unknown command '$1'" >&2;
echo "$usage";
return 1;;
esac
}
function lsvenv () {
venvs=()
for item in /usr/bin/ls -d "$1"/*/; do
if stat "${item}/bin/activate" &>/dev/null; then
venvs+=("$(basename "$item")");
fi
done
echo "${venvs[*]}"
}

7
resources/motd.j2 Normal file
View File

@@ -0,0 +1,7 @@
//////////// //// //// ///////////
//// ////// //// //// ////
//////// //// /// //// ///////////
//// //// ////// ////
//////////// //// //// {{ omni_description | default('Omni Network System') }}
_______________________________{{ omni_description | default('Omni Network System') | length * '\\' }}\

View File

@@ -0,0 +1,9 @@
# ANSIBLE MANAGED FILE - DO NOT EDIT
[NetDev]
Name={{ item.0.key }}
Kind=vlan
[VLAN]
Id={{ item.1 }}
# EOF

View File

@@ -0,0 +1,27 @@
# ANSIBLE MANAGED FILE - DO NOT EDIT
[Match]
Name={{ item.key }}
[Network]
DHCP={{ 'Yes' if item.value['dhcp'] | default(false) == true else 'No' }}
IPv6AcceptRA={{ 'Yes' if item.value['dhcp6'] | default(false) == true else 'No' }}
{% if item.value['addresses'] is defined %}
{% for ip_addr in item.value['addresses'] %}
Address={{ ip_addr }}
{% endfor %}
{% endif %}
{% if item.value['dns'] is defined %}
{% for dns_server in item.value['dns'] %}
DNS={{ dns_server }}
{% endfor %}
{% endif %}
{% if item.value['gateway'] is defined %}
Gateway={{ item.value['gateway'] }}
{% endif %}
{% if item.value['vlans'] is defined %}
{% for vlan_tag in item.value['vlans'] %}
VLAN={{ item.key }}.{{ vlan_tag }}
{% endfor %}
{% endif %}
# EOF

View File

@@ -1,45 +0,0 @@
---
- name: Configure firewall for NFS
become: true
firewalld:
immediate: yes
permenant: yes
service: nfs
state: enabled
zone: public
- name: Install NFS
become: true
when: ansible_distribution == "CentOS"
yum:
name: nfs-utils
state: latest
- name: Enable NFS server
become: true
service:
name: nfs-server
enabled: true
state: started
- name: Create exports directory
become: true
file:
path: /share
state: directory
- name: Symlink shares to exports directory
become: true
file:
dest: /share/{{ item.name }}
src: {{ item.path }}
state: link
- name: Modify /etc/exports
become: true
lineinfile:
path: /etc/exports
backup: yes
create: true
state: present
line: "/share/{{ item.name }} {{ item.access }}({{ item.permissions }})"

View File

@@ -1,31 +0,0 @@
- name: Check system compatibility
when: ansible_distribution != "CentOS" and ansible_distribution != "Red Hat Enterprise Linux"
meta: end_play
debug:
msg: "Hypervisor deployment is only supported on CentOS and RHEL"
- name: Temporarily disable IUS and EPEL repositories
become: true
command: mv /etc/yum.repos.d/{{ item }}.repo /etc/yum.repos.d/{{ item }}.repo.bak
with_items:
- ius
- epel
- name: Install OVirt repository
become: true
yum:
name: http://resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm
state: latest
- name: Install OVirt Engine
become: true
yum:
name: ovirt-engine
state: latest
- name: Re-enable IUS and EPEL repositories
become: true
command: mv /etc/yum.repos.d/{{ item }}.repo.bak /etc/yum.repos.d/{{ item }}.repo
with_items:
- ius
- epel

View File

@@ -1,65 +0,0 @@
---
# The dracut patch is an issue uniquely bound to the fact that I'm using several
# old-as-shit hardware RAID cards. Specifically the Dell PERC H200 and the Dell PERC
# H310, both of which had their hardware drivers dropped in Cent8 (despite the drivers
# being included in the upstream fedora kernel, but whatever). OS installation and the
# process in this set of tasks is based off of this blog post:
# https://www.centos.org/forums/viewtopic.php?t=71862#p302447
#
# TODO: Host the RPMs locally. The internet may never forget, but it's also never there
# when you need it
- name: Determine dracut version
shell:
cmd: rpm -qa | grep dracut-[0-9]
warn: false
register: dracut_version_check
- name: Install patched version of dracut
when: dracut_version_check.stdout != "dracut-049-13.git20190614.p1.el8_0.elrepo.x86_64"
block:
- name: Create temporary download directory
file:
path: /tmp/dracut-patch
state: directory
- name: Download patched dracut tool RPMs
get_url:
url: "{{ item.source }}"
dest: /tmp/dracut-patch/{{ item.dest }}
loop:
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-caps-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-caps.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-config-generic-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-config-generic.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-config-rescue-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-config-rescue.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-live-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-live.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-network-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-network.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-squash-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-squash.rpm
- source: http://elrepo.org/people/akemi/testing/el8/dracut/dracut-tools-049-13.git20190614.p1.el8_0.elrepo.x86_64.rpm
dest: dracut-tools.rpm
- name: Install patched dracut toolchain
become: true
dnf:
state: latest
name:
- /tmp/dracut-patch/dracut.rpm
- /tmp/dracut-patch/dracut-caps.rpm
- /tmp/dracut-patch/dracut-config-generic.rpm
- /tmp/dracut-patch/dracut-config-rescue.rpm
- /tmp/dracut-patch/dracut-live.rpm
- /tmp/dracut-patch/dracut-network.rpm
- /tmp/dracut-patch/dracut-squash.rpm
- /tmp/dracut-patch/dracut-tools.rpm
- name: Remove temporary download directory
file:
path: /tmp/dracut-patch
state: absent

View File

@@ -1,13 +0,0 @@
---
- name: Enable Extra Packages for Enterprise Linux
become: true
dnf:
state: latest
name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
- name: Enable the power tools repository
become: true
lineinfile:
path: /etc/yum.repos.d/CentOS-PowerTools.repo
regexp: "enabled=(0|1)"
line: "enabled=1"

View File

@@ -1,7 +0,0 @@
---
- name: Clean DNF cache
become: true
when: ansible_distribution == "Fedora" or ansible_distribution == "CentOS"
shell:
cmd: dnf clean all
warn: false

View File

@@ -1,23 +0,0 @@
---
- import_tasks: centos-repos.yml
when: ansible_distribution == "CentOS"
- import_tasks: clean.yml
when: clean | default(false) == true
- import_tasks: update.yml
when: update | default(false) == true
- name: Install packages on Fedora
become: true
when: ansible_distribution == "Fedora"
dnf:
state: latest
name: "{{ packages_global + packages_fedora }}"
- name: Install packages on CentOS
become: true
when: ansible_distribution == "CentOS"
dnf:
state: latest
name: "{{ packages_global + packages_centos }}"

View File

@@ -1,16 +0,0 @@
---
- import_tasks: centos-dracut.yml
when: ansible_distribution == "CentOS"
- name: Upgrade Fedora and CentOS packages
when: ansible_distribution == "CentOS" or ansible_distribution == "Fedora"
become: true
dnf:
state: latest
name: "*"
exclude: "{{ ','.join(exclude | default(['kernel*'])) }}"
# - name: Upgrade APT packages
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
# become: true
# apt:

View File

@@ -1,35 +0,0 @@
---
packages_global:
- automake
- cmake
- curl
- gcc
- gcc-c++
- git
- make
- nano
- openssl-devel
- systemd-devel
- unzip
- vim
- vim-minimal
packages_fedora:
- libselinux-python
- git-lfs
- readline-devel
- policycoreutils-python
- python-devel
- python-virtualenv
- python3-devel
packages_centos:
- bind-utils
- bash-completion
- nc
- nfs-utils
- python3
- python3-pip
- python3-setuptools
- python3-virtualenv
- wget

View File

@@ -1,7 +0,0 @@
//////////// //// //// ///////////
//// ////// //// //// ////
//////// //// /// //// ///////////
//// //// ////// ////
//////////// //// //// {{ description | default('Omni Network System') }}
_______________________________{{ description | default('Omni Network System') | length * '\\' }}\

View File

@@ -0,0 +1,61 @@
---
# This is a workaround for Cent8 removing drivers from the kernel that are required for
# my RAID cards to work. Kernel-Plus includes the drivers, thus one of the first things
# we need to do is to replace the kernel before doing an update.
- name: Replace default kernel with kernel-plus on CentOS 8
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
block:
- name: Disable kernel installation from base repository
lineinfile:
path: /etc/yum.repos.d/CentOS-Base.repo
line: exclude=kernel*
- name: Enable Centos-plus repository
lineinfile:
path: /etc/yum.repos.d/CentOS-centosplus.repo
regexp: "#?enabled=(0|1)"
line: enabled=1
- name: Enable kernel installation from plus repository
lineinfile:
path: /etc/yum.repos.d/CentOS-centosplus.repo
line: includepkgs=kernel*
# Note that the order of the next four tasks is very specific and intentional
# See this wiki page: https://plone.lucidsolutions.co.nz/linux/centos/7/install-centos-plus-kernel-kernel-plus/view
- name: Install kernel-plus
dnf:
state: "{{ _runtime_update_state }}"
name:
- kernel-plus
- kernel-plus-devel
register: _dnf_kernel_plus
- name: Uninstall kernel-tools
dnf:
name:
- kernel-tools
- kernel-tools-libs
state: absent
- name: Install kernel-plus-tools
dnf:
state: "{{ _runtime_update_state }}"
name:
- kernel-plus-tools
- kernel-plus-tools-libs
- name: Reboot into new kernel
when: _dnf_kernel_plus.changed is true and "centos.plus" not in ansible_kernel
reboot:
reboot_timeout: 3600
- name: Uninstall kernel
dnf:
state: absent
name:
- kernel
- kernel-devel
- kernel-core
- kernel-modules

View File

View File

@@ -1,9 +0,0 @@
---
- name: Install python bindings using YUM
become: true
yum:
state: latest
name:
- libselinux-python
- policycoreutils-python
- python-firewall

View File

@@ -1,8 +0,0 @@
---
- name: Install systemd-networkd
become: true
yum:
state: latest
name:
- systemd-resolved
- systemd-networkd

View File

@@ -1,9 +0,0 @@
---
- name: Install global packages using YUM
become: true
yum:
state: latest
name: "{{ item }}"
with_items:
- "{{ packages_global }}"
- "{{ packages_yum }}"

View File

@@ -1,37 +0,0 @@
---
- name: Enable Extra Packages for Enterprise Linux
become: true
yum_repository:
name: epel
description: Extra Packages for Enterprise Linux
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Install Extra Packages for Enterprise Linux GPG key
become: true
rpm_key:
state: present
key: https://archive.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
- name: Enable Inline with Upstream Stable
become: true
yum:
state: latest
name: https://centos7.iuscommunity.org/ius-release.rpm
- name: Disable yum subscription-manager
become: true
lineinfile:
regex: enabled=1
line: enabled=0
path: /etc/yum/pluginconf.d/subscription-manager.conf
create: yes
state: present
- name: Disable yum repo report upload
become: true
lineinfile:
regex: enabled=1
line: enabled=0
path: /etc/yum/pluginconf.d/enabled_repos_upload.conf
create: yes
state: present

36
tasks/docker/install.yml Normal file
View File

@@ -0,0 +1,36 @@
---
# Just use the same repo for cent7 and cent8 because ¯\_(ツ)_/¯
- name: Install Docker repository
become: true
when: ansible_distribution == "CentOS"
yum_repository:
name: docker-ce-stable
description: Docker CE Stable - $basearch
file: docker-ce-stable
baseurl: https://download.docker.com/linux/centos/7/$basearch/stable
gpgcheck: false
- name: Install Docker on Cent7
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
yum:
# Update the cache to update with the new docker repo
update_cache: true
state: "{{ _runtime_update_state }}"
name:
- device-mapper-persistent-data # Required for docker devicestorage driver
- lvm2 # same
- docker-ce
- containerd.io
- name: Install Docker on Cent8
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
dnf:
# Update the cache to update with the new docker repo
update_cache: true
state: "{{ _runtime_update_state }}"
name:
- device-mapper-persistent-data # Required for docker devicestorage driver
- lvm2 # same
- docker-ce-3:18.09.1-3.el7

View File

@@ -1,9 +0,0 @@
---
- name: Install python bindings using DNF
become: true
dnf:
state: latest
name:
- libselinux-python
- policycoreutils-python
- python3-firewall

View File

@@ -1,8 +0,0 @@
---
- name: Install systemd-networkd
become: true
dnf:
state: latest
name:
- systemd-resolved
- systemd-networkd

View File

@@ -1,9 +0,0 @@
---
- name: Install global packages using DNF
become: true
dnf:
state: latest
name: "{{ item }}"
with_items:
- "{{ packages_global }}"
- "{{ packages_dnf }}"

View File

@@ -1,55 +0,0 @@
---
# The directory is deleted ahead of creation to ensure that no old configs
# remain after runnign ansible
- name: Delete networkd config directory
become: true
file:
path: /etc/systemd/network
state: absent
- name: Create the networkd config directory
become: true
file:
path: /etc/systemd/network
state: directory
- name: Make .network files
become: true
template:
src: network.j2
dest: "/etc/systemd/network/{{ item.key }}.network"
with_dict: "{{ networking }}"
- name: Configure systemd services
become: true
block:
- name: Disable network scripts and NetworkManager
service:
name: "{{ item }}"
enabled: false
with_items:
- network
- NetworkManager
- NetworkManager-wait-online
- name: Enable systemd-networkd and systemd-resolved
service:
name: "{{ item }}"
enabled: true
state: started
with_items:
- systemd-networkd
- systemd-resolved
- systemd-networkd-wait-online
- name: Symlink so systemd-resolved uses /etc/resolv.conf
file:
dest: /etc/resolv.conf
src: /run/systemd/resolve/resolv.conf
state: link
force: true
setype: net_conf_t
- name: Symlink so /etc/resolv.conf uses systemd
file:
dest: /etc/systemd/system/multi-user.target.wants/systemd-resolved.service
src: /usr/lib/systemd/system/systemd-resolved.service
state: link
force: true

View File

@@ -1,22 +0,0 @@
---
# The directory is deleted ahead of creation to ensure that no old configs
# remain after runnign ansible
- name: Delete networkd config directory
become: true
file:
path: /etc/systemd/network
state: absent
- name: Create the networkd config directory
become: true
file:
path: /etc/systemd/network
state: directory
- name: Make .network files
when: networking is defined
become: true
template:
src: network.j2
dest: "/etc/systemd/network/{{ item.key }}.network"
with_dict: "{{ networking }}"

View File

@@ -0,0 +1,24 @@
---
- name: Configure networking via systemd
become: true
when: omni_networking is defined
block:
- name: Create the networkd config directory
file:
path: /etc/systemd/network
state: directory
mode: 0755
- name: Make network files
template:
src: networkd/network.j2
dest: "/etc/systemd/network/{{ item.key }}.network"
mode: 0644
loop: "{{ omni_networking | dict2items }}"
- name: Make netdev files
template:
src: networkd/netdev.j2
dest: "/etc/systemd/network/{{ item.0.key + '.' + item.1 }}.netdev"
mode: 0644
loop: "{{ omni_networking | dict2items | subelements('value.vlans', true) }}"

View File

@@ -0,0 +1,26 @@
---
- name: Install systemd-networkd on CentOS 7
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
yum:
state: "{{ _runtime_update_state }}"
name:
- systemd-networkd
- systemd-resolved
- name: Install systemd-networkd on CentOS 8
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
dnf:
state: "{{ _runtime_update_state }}"
name: systemd-networkd
- name: Install systemd-networkd on Fedora
when: ansible_distribution == "Fedora" and ansible_distribution_major_version == "8"
become: true
dnf:
state: "{{ _runtime_update_state }}"
name:
- systemd-networkd
- systemd-resolved

View File

@@ -1,21 +1,19 @@
---
- name: Disable network scripts and NetworkManager
- name: Disable NetworkManager
become: true
service:
systemd:
name: "{{ item }}"
enabled: false
with_items:
- network
loop:
- NetworkManager
- NetworkManager-wait-online
- name: Enable systemd-networkd and systemd-resolved
- name: Enable systemd-networkd
become: true
service:
systemd:
name: "{{ item }}"
enabled: true
state: started
with_items:
loop:
- systemd-networkd
- systemd-resolved
- systemd-networkd-wait-online

17
tasks/packages/clean.yml Normal file
View File

@@ -0,0 +1,17 @@
---
# I'm honestly not sure why these 304 warnings are being raised by the linter here...
- name: Clean DNF cache # noqa: 304
when: ansible_distribution == "Fedora" or (ansible_distribution == "CentOS" and ansible_distribution_major_version == "8")
become: true
command:
cmd: /usr/bin/dnf clean all
warn: false
changed_when: true
- name: Clean YUM cache # noqa: 304
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
command:
cmd: /usr/bin/yum clean all
warn: false
changed_when: true

View File

@@ -0,0 +1,21 @@
# NOTE: This is currently horrifically broken. See the ongoing drama around
# systemd-networkd on cent8. Basically triggering an update- or an install- will give
# a conflict error due to the spicy-jankaroni-with-extra-cheese edition of
# systemd-networkd I'm running. We can exclude "systemd*", but we need to install
# systemd-devel so then we get a package not found error. Its a truly stupid problem
# that will hopefully all go away when this bug gets fixed and systemd-networkd becomes
# available in EPEL:
# https://bugzilla.redhat.com/show_bug.cgi?id=1789146
- name: Install packages on CentOS 8
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
dnf:
state: "{{ _runtime_update_state }}"
name: "{{ omni_packages_global + omni_packages_centos_8 }}"
- name: Install packages on CentOS 7
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
yum:
state: "{{ _runtime_update_state }}"
name: "{{ omni_packages_global + omni_packages_centos_7 }}"

32
tasks/packages/repos.yml Normal file
View File

@@ -0,0 +1,32 @@
---
- name: Install repositories on CentOS 8
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
block:
- name: Enable Extra Packages for Enterprise Linux on CentOS 8
dnf:
state: present
name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
# The testing repo had to be enabled for a previous version of systemd-networkd
# to be installed
- name: Disable EPEL-Testing repository on CentOS 8
lineinfile:
path: /etc/yum.repos.d/epel-testing.repo
regexp: "enabled=(0|1)"
line: "enabled=0"
insertbefore: "^$"
firstmatch: true
- name: Enable the power tools repository on CentOS 8
lineinfile:
path: /etc/yum.repos.d/CentOS-PowerTools.repo
regexp: "enabled=(0|1)"
line: "enabled=1"
- name: Enable Extra Packages for Enterprise Linux on CentOS 7
become: true
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
yum:
state: present
name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm

32
tasks/packages/update.yml Normal file
View File

@@ -0,0 +1,32 @@
---
# Ansible Lint 403 ("Package installs should not use latest") is silenced here because
# it would defeat the point otherwise
- name: Upgrade Fedora and CentOS 8 packages # noqa: 403
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "8"
become: true
dnf:
state: latest
name: "*"
exclude: "{{ ','.join(omni_pkg_exclude | default(['kernel*', 'docker-ce'])) }}"
- name: Upgrade CentOS 7 packages # noqa: 403
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
become: true
yum:
state: latest
name: "*"
exclude: "{{ ','.join(omni_pkg_exclude | default(['kernel*', 'docker-ce'])) }}"
- name: Upgrade Fedora packages # noqa: 403
when: ansible_distribution == "Fedora"
become: true
dnf:
state: latest
name: "*"
exclude: "{{ ','.join(omni_pkg_exclude | default(['kernel*', 'docker-ce'])) }}"
# Yeah I'll get here eventually
# - name: Upgrade APT packages
# when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
# become: true
# apt:

Some files were not shown because too many files have changed in this diff Show More