diff --git a/.chglog/CHANGELOG.tpl.md b/.chglog/CHANGELOG.tpl.md
deleted file mode 100644
index 1af11b8438..0000000000
--- a/.chglog/CHANGELOG.tpl.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Change Log
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
-project adheres to [Semantic Versioning](http://semver.org/).
-
-{{ if .Versions -}}
-<a name="unreleased"></a>
-## [Unreleased]
-{{ if .Unreleased.CommitGroups -}}
-{{ range .Unreleased.CommitGroups -}}
-{{ .Title }}:
-{{ range .Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst }}
-{{ end -}}
-{{ end }}
-{{ end -}}
-{{ else }}
-{{ range .Unreleased.Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst}}
-{{ end -}}
-{{ end }}
-{{ end -}}
-
-{{- if .Unreleased.NoteGroups -}}
-{{ range .Unreleased.NoteGroups -}}
-{{ .Title }}:
-{{ range .Notes -}}
-- {{ .Body }}
-{{ end }}
-{{ end -}}
-{{ end -}}
-{{ end -}}
-
-{{ range .Versions }}
-<a name="{{ .Tag.Name }}"></a>
-## {{ if .Tag.Previous }}[{{ .Tag.Name }}]{{ else }}{{ .Tag.Name }}{{ end }} - {{ datetime "2006-01-02" .Tag.Date }}
-{{ if .CommitGroups -}}
-{{ range .CommitGroups -}}
-{{ .Title }}:
-{{ range .Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst }}
-{{ end -}}
-{{ end }}
-{{ end -}}
-{{ else }}
-{{ range .Commits -}}
-{{- if .Subject -}}
-- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject | upperFirst }}
-{{ end -}}
-{{ end }}
-{{ end -}}
-
-{{- if .NoteGroups -}}
-{{ range .NoteGroups -}}
-{{ .Title }}:
-{{ range .Notes -}}
-- {{ .Body }}
-{{ end }}
-{{ end -}}
-{{ end -}}
-{{ end -}}
-
-{{- if .Versions }}
-[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
-{{ range .Versions -}}
-{{ if .Tag.Previous -}}
-[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
-{{ end -}}
-{{ end -}}
-{{ end -}}
diff --git a/.chglog/config.yml b/.chglog/config.yml
deleted file mode 100644
index 06f2cfb375..0000000000
--- a/.chglog/config.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-style: github
-template: CHANGELOG.tpl.md
-info:
-  title: CHANGELOG
-  repository_url: https://github.com/terraform-aws-modules/terraform-aws-eks
-options:
-  commits:
-    sort_by: Type
-    filters:
-      Type:
-        - feat
-        - fix
-        - improvement
-        - docs
-        - refactor
-        - test
-        - ci
-
-  commit_groups:
-    group_by: Type
-    sort_by: Custom
-    title_order:
-      - feat
-      - improvement
-      - refactor
-      - fix
-      - docs
-      - test
-      - ci
-    title_maps:
-      feat: FEATURES
-      fix: BUG FIXES
-      improvement: ENHANCEMENTS
-      docs: DOCS
-      refactor: REFACTORS
-      test: TESTS
-      ci: CI
-
-  header:
-    pattern: "^(.+)\\s*:\\s*(.+)$"
-    pattern_maps:
-      - Type
-      - Subject
-
-  notes:
-    keywords:
-      - BREAKING CHANGES
-      - NOTES
-
-  refs:
-    actions:
-      - Closes
-      - Fixes
-      - Resolves
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000..176a458f94
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/CHANGELOG.pre-v11.0.0.md b/.github/CHANGELOG.pre-v11.0.0.md
similarity index 100%
rename from CHANGELOG.pre-v11.0.0.md
rename to .github/CHANGELOG.pre-v11.0.0.md
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
deleted file mode 100644
index 65df33743e..0000000000
--- a/.github/CONTRIBUTING.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Contributing
-
-When contributing to this repository, please first discuss the change you wish to make via issue,
-email, or any other method with the owners of this repository before making a change.
-
-Please note we have a code of conduct, please follow it in all your interactions with the project.
-
-## Pull Request Process
-
-1.  Ensure any install or build dependencies are removed before the end of the layer when doing a build.
-2.  Update the README.md with details of changes to the interface, this includes new environment variables, exposed ports, useful file locations, and container parameters.
-3. Once all outstanding comments and checklist items have been addressed, your contribution will be merged! Merged PRs will be included in the next release. The terraform-aws-eks maintainers take care of updating the CHANGELOG as they merge.
-
-## Checklists for contributions
-
-- [ ] Add [semantics prefix](#semantic-pull-requests) to your PR or Commits (at least one of your commit groups)
-- [ ] CI tests are passing
-- [ ] README.md has been updated after any changes to variables and outputs. See https://github.com/terraform-aws-modules/terraform-aws-eks/#doc-generation
-
-## Semantic Pull Requests
-
-To generate changelog, Pull Requests or Commits must have semantic and must follow conventional specs below:
-
-- `feat:` for new features
-- `fix:` for bug fixes
-- `improvement:` for enhancements
-- `docs:` for documentation and examples
-- `refactor:` for code refactoring
-- `test:` for tests
-- `ci:` for CI purpose
-- `chore:` for chores stuff
-
-The `chore` prefix skipped during changelog generation. It can be used for `chore: update changelog` commit message by example.
-
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index bcb7188d94..0000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# I have issues
-
-## I'm submitting a...
-
-* [ ] bug report
-* [ ] feature request
-* [ ] support request - read the [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md) first!
-* [ ] kudos, thank you, warm fuzzy
-
-## What is the current behavior?
-
-
-
-## If this is a bug, how to reproduce? Please include a code sample if relevant.
-
-
-
-## What's the expected behavior?
-
-
-
-## Are you able to fix this problem and submit a PR? Link here if you have already.
-
-## Environment details
-
-* Affected module version:
-* OS:
-* Terraform version:
-
-## Any other relevant info
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index ce48f0220a..0000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# PR o'clock
-
-## Description
-
-Please explain the changes you made here and link to any relevant issues.
-
-### Checklist
-
-- [ ] README.md has been updated after any changes to variables and outputs. See https://github.com/terraform-aws-modules/terraform-aws-eks/#doc-generation
diff --git a/.github/images/security_groups.svg b/.github/images/security_groups.svg
new file mode 100644
index 0000000000..6012962597
--- /dev/null
+++ b/.github/images/security_groups.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:lucid="lucid" width="1167.42" height="896.37"><g transform="translate(-292.58024691358025 -230)" lucid:page-tab-id="0_0"><path d="M0 0h1760v1360H0z" fill="#fff"/><path d="M820 380h120v120H820V380z" stroke="#000" stroke-opacity="0" fill="url(#a)"/><path d="M914.6 457.1c.57.32.94.9.94 1.55 0 .66-.3 1.24-.88 1.65l-33.7 20.6c-.35.18-.64.26-1 .26-.3 0-.67-.08-.96-.25l-33.76-20.6c-.5-.33-.88-1-.88-1.65v-39.32c0-.66.37-1.24.88-1.57l29.96-18.8c.58-.32 1.32-.32 1.9 0 .58.34 1.02.9 1.02 1.66v16.8c0 .67-.36 1.25-.95 1.66l-14.1 8.74v22.26l16.95 10.22 19.66-10.8c.6-.24 1.24-.24 1.83.1zm-34.65 20.02L910 458.74l-9.44-5.44-19.73 10.8c-.6.32-1.25.24-1.83-.1l-18.7-11.2c-.6-.33-.96-1-.96-1.65v-24.32c0-.66.37-1.23.88-1.56l14.1-8.83V404l-26.23 16.4v37.26zm36.54-59.44c.57.33.94 1 .94 1.65v31.82c0 .66-.37 1.32-.88 1.65-.3.17-.65.25-1.02.25-.3 0-.5-.08-.8-.17l-15-7.5c-.64-.33-1-1-1-1.73V427.9l-15.94-8.82c-.58-.33-.95-.9-.95-1.65v-16.8c0-.67.37-1.33.95-1.66.58-.33 1.32-.33 1.83 0zm-2.8 30.5V420.4l-28.12-16.57v12.53l15.93 8.82c.6.33.96 1 .96 1.65v15.67zm-23-21.35l-9.5 11.87 9.93 12.53h-4.16l-8.85-11.3v11.22h-3.8v-24.32h3.8v9.8l8.33-9.8z" stroke="#000" stroke-opacity="0" fill="#fff"/><use xlink:href="#b" transform="matrix(1,0,0,1,790,509) translate(72 14.4)"/><path d="M700 266a6 6 0 0 1 6-6h368a6 6 0 0 1 6 6v308a6 6 0 0 1-6 6H706a6 6 0 0 1-6-6z" stroke="#dd3522" stroke-width="3" fill="#fff" fill-opacity="0"/><use xlink:href="#c" transform="matrix(1,0,0,1,700,280) translate(99.15 14.4)"/><use xlink:href="#d" transform="matrix(1,0,0,1,700,280) translate(161 14.4)"/><use xlink:href="#e" transform="matrix(1,0,0,1,700,280) translate(230.9 14.4)"/><path d="M460 709a6 6 0 0 1 6-6h828a6 6 0 0 1 6 6v248a6 6 0 0 1-6 6H466a6 6 0 0 1-6-6z" stroke="#dd3522" stroke-width="3" fill="#fff" fill-opacity="0"/><use xlink:href="#f" transform="matrix(1,0,0,1,460,723) translate(336.1 14.4)"/><use xlink:href="#d" transform="matrix(1,0,0,1,460,723) translate(384.05 14.4)"/><use xlink:href="#e" transform="matrix(1,0,0,1,460,723) translate(453.95000000000005 14.4)"/><path d="M740 326a6 6 0 0 1 6-6h288a6 6 0 0 1 6 6v208a6 6 0 0 1-6 6H746a6 6 0 0 1-6-6z" stroke="#dd3522" stroke-width="3" fill="#fff" fill-opacity="0"/><use xlink:href="#g" transform="matrix(1,0,0,1,740,340) translate(22.24999999999997 14.4)"/><use xlink:href="#h" transform="matrix(1,0,0,1,740,340) translate(91.09999999999998 14.4)"/><use xlink:href="#d" transform="matrix(1,0,0,1,740,340) translate(157.9 14.4)"/><use xlink:href="#e" transform="matrix(1,0,0,1,740,340) translate(227.8 14.4)"/><path d="M798.37 804H658.5v-2h139.87zm303.13 0H961.63v-2h139.87z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M657.5 807.64L643.25 803l14.27-4.64z" fill="#5e5e5e"/><path d="M658.5 809l-18.5-6 18.5-6zm-12.02-6l10.03 3.26v-6.52z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M1116.76 803l-14.26 4.64v-9.28z" fill="#5e5e5e"/><path d="M1120 803l-18.5 6v-12zm-16.5 3.26l10.03-3.26-10.03-3.26z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><use xlink:href="#i" transform="matrix(1,0,0,1,798.3739703343706,792.3333333333335) translate(0 14.222222222222223)"/><use xlink:href="#j" transform="matrix(1,0,0,1,798.3739703343706,792.3333333333335) translate(43.35802469135803 14.222222222222223)"/><use xlink:href="#k" transform="matrix(1,0,0,1,798.3739703343706,792.3333333333335) translate(83.40740740740742 14.222222222222223)"/><use xlink:href="#l" transform="matrix(1,0,0,1,798.3739703343706,792.3333333333335) translate(101.13580246913583 14.222222222222223)"/><path d="M767.14 843.2l-108.62.68-.02-2 108.64-.67zm334.36-2.08l-108.63.67v-2l108.62-.68z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M657.54 847.53l-14.3-4.55 14.24-4.72z" fill="#5e5e5e"/><path d="M658.55 848.9L640 843l18.47-6.13zm-12.07-5.94l10.05 3.2-.04-6.52z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M1116.76 840.02l-14.23 4.72-.06-9.27z" fill="#5e5e5e"/><path d="M1120 840l-18.46 6.13-.08-12.03zm-16.48 3.36l10-3.32-10.04-3.2z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><use xlink:href="#i" transform="matrix(1,0,0,1,767.1394024331361,830.8333108335584) translate(0 14.222222222222223)"/><use xlink:href="#m" transform="matrix(1,0,0,1,767.1394024331361,830.8333108335584) translate(43.35802469135803 14.222222222222223)"/><use xlink:href="#n" transform="matrix(1,0,0,1,767.1394024331361,830.8333108335584) translate(183.4567901234568 14.222222222222223)"/><path d="M600 726a6 6 0 0 1 6-6h528a6 6 0 0 1 6 6v66.27a6 6 0 0 1-6 6H606a6 6 0 0 1-6-6z" stroke="#000" stroke-opacity="0" stroke-width="3" fill="#fff" fill-opacity="0"/><use xlink:href="#o" transform="matrix(1,0,0,1,605,725) translate(50.15493827160495 39.34027777777778)"/><use xlink:href="#p" transform="matrix(1,0,0,1,605,725) translate(92.06851851851853 39.34027777777778)"/><use xlink:href="#q" transform="matrix(1,0,0,1,605,725) translate(105.58703703703705 39.34027777777778)"/><use xlink:href="#r" transform="matrix(1,0,0,1,605,725) translate(119.1672839506173 39.34027777777778)"/><use xlink:href="#s" transform="matrix(1,0,0,1,605,725) translate(408.633950617284 39.34027777777778)"/><use xlink:href="#t" transform="matrix(1,0,0,1,605,725) translate(418.26358024691353 39.34027777777778)"/><use xlink:href="#u" transform="matrix(1,0,0,1,605,725) translate(474.06728395061725 39.34027777777778)"/><a xlink:href="http://kubernetes.io/cluster/&lt;cluster-name&gt;" target="_blank" transform="matrix(1,0,0,1,605,725)"><path class="lucid-link lucid-hotspot lucid-overlay-hotspot" fill-opacity="0" d="M119.17 21.56h284.65v26.67H119.17z"/></a><path d="M880.55 983.2h-2v-50.13h2zm0-71.47h-2V880h2z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M894.55 882h-30v-2h30z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M879.55 998.48l-4.64-14.27h9.28z" fill="#5e5e5e"/><path d="M879.55 1001.7l-6-18.5h12zm-3.26-16.5l3.25 10.04 3.26-10.03z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><use xlink:href="#v" transform="matrix(1,0,0,1,804.6345034421599,911.7333333333333) translate(0 14.222222222222223)"/><use xlink:href="#w" transform="matrix(1,0,0,1,804.6345034421599,911.7333333333333) translate(33.48148148148149 14.222222222222223)"/><use xlink:href="#x" transform="matrix(1,0,0,1,804.6345034421599,911.7333333333333) translate(107.55555555555557 14.222222222222223)"/><path d="M904.4 1050.7h-8.48v-3.4h8.48c8.16 0 12.08-3.6 12.08-11 0-6.34-3.68-10.34-11.04-11.8-.8-.16-1.36-.82-1.36-1.58-.4-6.14-3.92-8.37-6.8-8.37-2.08.05-3.84.96-5.12 2.64-.56.7-1.6.85-2.4.3-.24-.2-.4-.46-.56-.77-1.04-3.14-2.56-5.53-4.96-7.86-5.36-5.42-12.96-6.9-20-3.85-6.32 2.9-10.24 8.72-10.48 15.72 0 .7.08 1.3.16 2.08.08.86-.48 1.62-1.28 1.82-3.44.86-9.28 3.45-9.28 11.46v.8c.4 6.04 5.44 10.35 12.08 10.35h8.48v3.4h-8.48c-8.48 0-14.96-5.68-15.44-13.5v-1.05c-.24-6.7 3.92-12.52 10.4-14.35v-1.06c.24-11.68 9.92-20.95 21.6-20.7 5.68.15 10.56 2.23 14.64 6.4 2.08 2.02 3.52 4.1 4.72 6.78 1.76-1.37 3.68-1.98 5.92-2.03 4.48 0 9.2 3.2 10.16 10.25 8.08 2.07 12.56 7.3 12.56 14.85-.08 9.33-5.6 14.45-15.6 14.45zm-18.08 0H873.6v-3.4h12.72z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M891.1 1049.1c-.47 0-.86-.16-1.18-.5l-9.58-9.57 2.4-2.42 8.36 8.33 8.37-8.38 2.4 2.42-9.57 9.56c-.32.37-.73.56-1.2.57z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M889.4 1021.85h3.4v23.95h-3.4v-23.95zM877.17 1032.65l-8.38-8.38-8.38 8.38-2.4-2.42 9.57-9.56c.66-.67 1.7-.67 2.38 0l9.58 9.56z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M867.1 1021.85h3.4v28.75h-3.4v-28.75z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M904.4 1050.7h-48.95c-8.48 0-14.95-5.68-15.43-13.48v-1.07c-.24-6.74 3.92-12.5 10.4-14.4v-1.05-.8c.55-8.12 5.2-14.7 12.55-18.06 8.23-3.54 17.27-1.82 23.6 4.57 2.15 2.08 3.6 4.16 4.8 6.8 1.74-1.32 3.66-1.98 5.9-1.98 4.48 0 9.2 3.2 10.16 10.24 8.15 2.02 12.55 7.3 12.55 14.85-.08 9.28-5.6 14.4-15.6 14.4zm-32.96-47.18c-2.63 0-4.87.45-7.27 1.47-6.32 2.93-10.16 8.75-10.4 15.7 0 .76 0 1.42.16 2.13.08.86-.48 1.67-1.28 1.87-3.44.86-9.27 3.4-9.27 11.45v.92c.32 5.93 5.43 10.18 12.07 10.18h48.94c8.15 0 12.06-3.6 12.06-11 0-6.33-3.67-10.33-11.03-11.8-.8-.15-1.28-.8-1.36-1.57-.32-6.2-3.92-8.37-6.8-8.37-2.08.07-3.84.98-5.12 2.65-.55.7-1.6.86-2.4.3-.23-.2-.4-.45-.55-.76-1.04-3.1-2.56-5.53-4.96-7.86-3.52-3.55-7.76-5.32-12.8-5.32z" stroke="#fff" stroke-opacity="0" fill="#fff" fill-opacity="0"/><use xlink:href="#y" transform="matrix(1,0,0,1,810,1059.7) translate(39.81481481481482 17.77777777777778)"/><use xlink:href="#z" transform="matrix(1,0,0,1,810,1059.7) translate(32.376543209876544 44.44444444444444)"/><path d="M1100 334.33a6 6 0 0 1 6-6h328a6 6 0 0 1 6 6v211.34a6 6 0 0 1-6 6h-328a6 6 0 0 1-6-6z" stroke="#000" stroke-opacity="0" stroke-width="3" fill="#fff" fill-opacity="0"/><use xlink:href="#A" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(0 49.4375)"/><use xlink:href="#B" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(76.5 49.4375)"/><use xlink:href="#C" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(150.7222222222222 49.4375)"/><use xlink:href="#D" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(228.38888888888886 49.4375)"/><use xlink:href="#E" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(289.4444444444444 49.4375)"/><use xlink:href="#F" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(309.38888888888886 49.4375)"/><use xlink:href="#G" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(0 73.4375)"/><use xlink:href="#H" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(74.33333333333331 73.4375)"/><use xlink:href="#I" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(130.9444444444444 73.4375)"/><use xlink:href="#J" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(252.9444444444444 73.4375)"/><use xlink:href="#K" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(0 97.4375)"/><use xlink:href="#L" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(28.83333333333333 97.4375)"/><use xlink:href="#M" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(62.16666666666666 97.4375)"/><use xlink:href="#N" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(126.49999999999997 97.4375)"/><use xlink:href="#O" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(153.16666666666663 97.4375)"/><use xlink:href="#P" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(204.27777777777774 97.4375)"/><use xlink:href="#Q" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(247.49999999999994 97.4375)"/><use xlink:href="#H" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(0 121.4375)"/><use xlink:href="#R" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(56.6111111111111 121.4375)"/><use xlink:href="#E" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(82.11111111111109 121.4375)"/><use xlink:href="#S" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(102.05555555555553 121.4375)"/><use xlink:href="#T" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(165.3333333333333 121.4375)"/><use xlink:href="#L" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(186.38888888888883 121.4375)"/><use xlink:href="#U" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(219.72222222222217 121.4375)"/><use xlink:href="#V" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(0 147.21527777777777)"/><use xlink:href="#W" transform="matrix(1,0,0,1,1105,333.3333333333333) translate(0 170.95572916666666)"/><path d="M340 666a6 6 0 0 1 6-6h68a6 6 0 0 1 6 6v328a6 6 0 0 1-6 6h-68a6 6 0 0 1-6-6z" fill="none"/><path d="M420 1000c-5.52 0-10-4.48-10-10V840c0-5.52-4.48-10-10-10 5.52 0 10-4.48 10-10V670c0-5.52 4.48-10 10-10" stroke="#008a0e" stroke-width="3" fill="none"/><g><use xlink:href="#X" transform="matrix(1,0,0,1,345,665) translate(-6.851851851851848 156.11111111111111)"/><use xlink:href="#Y" transform="matrix(1,0,0,1,345,665) translate(-16.728395061728385 182.77777777777777)"/></g><path d="M340 256a6 6 0 0 1 6-6h68a6 6 0 0 1 6 6v328a6 6 0 0 1-6 6h-68a6 6 0 0 1-6-6z" fill="none"/><path d="M420 590c-5.52 0-10-4.48-10-10V430c0-5.52-4.48-10-10-10 5.52 0 10-4.48 10-10V260c0-5.52 4.48-10 10-10" stroke="#1071e5" stroke-width="3" fill="none"/><g><use xlink:href="#Z" transform="matrix(1,0,0,1,345,255) translate(-31.41975308641976 156.11111111111111)"/><use xlink:href="#aa" transform="matrix(1,0,0,1,345,255) translate(-16.728395061728385 182.77777777777777)"/></g><path d="M1022.5 683.02h-2v-26.5h2zm0-69.16h-2v-32.42h2z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M1036.5 583.44h-30v-2h30z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M1021.5 698.3l-4.65-14.28h9.27z" fill="#5e5e5e"/><path d="M1021.5 701.52l-6.03-18.5h12.03zm-3.27-16.5l3.26 10.03 3.24-10.03z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><g><use xlink:href="#ab" transform="matrix(1,0,0,1,976.5967152972673,613.8625717815738) translate(0.005000000000002558 14.222222222222223)"/><use xlink:href="#ac" transform="matrix(1,0,0,1,976.5967152972673,613.8625717815738) translate(6.523518518518522 35.55555555555556)"/></g><path d="M891 683.03h-2v-30.86h2zm0-52.2h-2v-30.86h2z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M894.64 598.97h-9.28L890 584.7z" fill="#5e5e5e"/><path d="M896 599.97h-12l6-18.5zm-9.26-2h6.52L890 587.94z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M890 698.3l-4.64-14.27h9.28z" fill="#5e5e5e"/><path d="M890 701.54l-6-18.5h12zm-3.26-16.5l3.26 10.02 3.26-10.03z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><g><use xlink:href="#ad" transform="matrix(1,0,0,1,854.9876543209876,630.8329133647177) translate(0 14.222222222222223)"/></g><path d="M572.7 431H553.7v-2h18.97zm165.84 0H675.3v-2h63.24z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M738.54 445h-2v-30h2z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M552.72 434.64L538.46 430l14.26-4.64z" fill="#5e5e5e"/><path d="M553.72 436l-18.5-6 18.5-6zm-12.03-6l10.02 3.26v-6.52z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><g><use xlink:href="#v" transform="matrix(1,0,0,1,572.6913580246913,419.3333333333333) translate(0 14.222222222222223)"/><use xlink:href="#ae" transform="matrix(1,0,0,1,572.6913580246913,419.3333333333333) translate(33.48148148148149 14.222222222222223)"/></g><path d="M524.4 455.35h-8.48v-3.4h8.48c8.16 0 12.08-3.6 12.08-11 0-6.34-3.68-10.34-11.04-11.8-.8-.16-1.36-.82-1.36-1.58-.4-6.14-3.92-8.37-6.8-8.37-2.08.05-3.84.96-5.12 2.64-.56.7-1.6.86-2.4.3-.24-.2-.4-.45-.56-.76-1.04-3.14-2.56-5.53-4.96-7.86-5.36-5.42-12.96-6.9-20-3.85-6.32 2.9-10.24 8.72-10.48 15.72 0 .7.08 1.3.16 2.06.08.87-.48 1.63-1.28 1.83-3.44.85-9.28 3.44-9.28 11.45v.8c.4 6.04 5.44 10.35 12.08 10.35h8.48v3.4h-8.48c-8.48 0-14.96-5.68-15.44-13.5v-1.05c-.24-6.7 3.92-12.52 10.4-14.35v-1.06c.24-11.67 9.92-20.94 21.6-20.7 5.68.16 10.56 2.24 14.64 6.4 2.08 2.03 3.52 4.1 4.72 6.8 1.76-1.38 3.68-2 5.92-2.04 4.48 0 9.2 3.2 10.16 10.25 8.08 2.07 12.56 7.3 12.56 14.85-.08 9.33-5.6 14.45-15.6 14.45zm-18.08 0H493.6v-3.4h12.72z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M511.1 453.75c-.47 0-.86-.16-1.18-.5l-9.58-9.57 2.4-2.42 8.36 8.32 8.37-8.38 2.4 2.42-9.57 9.56c-.32.37-.73.56-1.2.57z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M509.4 426.5h3.4v23.95h-3.4V426.5zM497.17 437.3l-8.38-8.38-8.38 8.38-2.4-2.42 9.57-9.56c.66-.67 1.7-.67 2.38 0l9.58 9.56z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M487.1 426.5h3.4v28.75h-3.4V426.5z" stroke="#fff" stroke-opacity="0" fill="#232f3e"/><path d="M524.4 455.35h-48.95c-8.48 0-14.95-5.68-15.43-13.48v-1.07c-.24-6.74 3.92-12.5 10.4-14.4v-1.05-.8c.55-8.12 5.2-14.7 12.55-18.06 8.23-3.55 17.27-1.83 23.6 4.56 2.15 2.07 3.6 4.15 4.8 6.8 1.74-1.33 3.66-2 5.9-2 4.48 0 9.2 3.2 10.16 10.25 8.15 2.03 12.55 7.3 12.55 14.86-.08 9.27-5.6 14.4-15.6 14.4zm-32.96-47.18c-2.63 0-4.87.45-7.27 1.47-6.32 2.94-10.16 8.76-10.4 15.7 0 .77 0 1.43.16 2.14.08.86-.48 1.67-1.28 1.87-3.44.86-9.27 3.4-9.27 11.45v.92c.32 5.93 5.43 10.18 12.07 10.18h48.94c8.15 0 12.06-3.6 12.06-11 0-6.33-3.67-10.33-11.03-11.8-.8-.15-1.28-.8-1.36-1.57-.32-6.2-3.92-8.37-6.8-8.37-2.08.06-3.84.97-5.12 2.64-.55.7-1.6.86-2.4.3-.23-.2-.4-.45-.55-.76-1.04-3.1-2.56-5.52-4.96-7.85-3.52-3.56-7.76-5.33-12.8-5.33z" stroke="#fff" stroke-opacity="0" fill="#fff" fill-opacity="0"/><g><use xlink:href="#af" transform="matrix(1,0,0,1,430,464.34999999999997) translate(39.81481481481482 17.77777777777778)"/><use xlink:href="#ag" transform="matrix(1,0,0,1,430,464.34999999999997) translate(32.376543209876544 44.44444444444444)"/></g><path d="M761.05 683.05h-2v-17.07h2zm0-81.07h-2v-20.5h2z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M775.05 583.5h-30v-2h30z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><path d="M760.05 698.3l-4.63-14.25h9.27z" fill="#5e5e5e"/><path d="M760.05 701.55l-6-18.5h12zm-3.26-16.5l3.25 10.03 3.26-10.03z" stroke="#5e5e5e" stroke-width=".05" fill="#5e5e5e"/><g><use xlink:href="#ah" transform="matrix(1,0,0,1,720.1025860778699,601.9836991891153) translate(0.005000000000002558 14.222222222222223)"/><use xlink:href="#ai" transform="matrix(1,0,0,1,720.1025860778699,601.9836991891153) translate(0.005000000000002558 35.55555555555556)"/><use xlink:href="#aj" transform="matrix(1,0,0,1,720.1025860778699,601.9836991891153) translate(18.81981481481482 56.88888888888889)"/></g><path d="M1274.12 827.24c3.24 0 5.88 2.64 5.88 5.88v51.24c0 3.12-2.52 5.64-5.64 5.64h-51.48c-3.12 0-5.64-2.52-5.64-5.64v-51.24c0-3.24 2.64-5.88 5.88-5.88zm.24 57.24l.12-51.36c0-.24-.12-.36-.36-.36h-51c-.24 0-.36.12-.36.36v51.24zm-82.32-28.56h20.64v5.4h-20.64c-3.12 0-5.76-2.52-5.76-5.76v-51.12c0-3.24 2.64-5.76 5.76-5.76h51.12c3.24 0 5.88 2.52 5.88 5.76v18.36h-5.52v-18.36c0-.24-.12-.36-.36-.36h-51.12c-.12 0-.36.12-.36.36v51.12c0 .24.24.36.36.36zm-26.16-28.68h15.96v5.52h-15.96c-3.24 0-5.88-2.64-5.88-5.88v-51c0-3.24 2.64-5.88 5.88-5.88h51c3.24 0 5.88 2.64 5.88 5.88v18.36h-5.52v-18.36c0-.24-.12-.36-.36-.36h-51c-.24 0-.36.12-.36.36v51c0 .24.12.36.36.36z" stroke="#000" stroke-opacity="0" fill="#d45b07"/><path d="M1163.54 773.5v56.58H1189v28.58h31.46v28.23h56.24v-56.6h-31.35v-28.23h-25.57V773.5z" stroke="#000" stroke-opacity="0" fill-opacity="0"/><g><use xlink:href="#ak" transform="matrix(1,0,0,1,1130,899) translate(41.05 14.4)"/><use xlink:href="#al" transform="matrix(1,0,0,1,1130,899) translate(89 14.4)"/></g><path d="M594.12 830.24c3.24 0 5.88 2.64 5.88 5.88v51.24c0 3.12-2.52 5.64-5.64 5.64h-51.48c-3.12 0-5.64-2.52-5.64-5.64v-51.24c0-3.24 2.64-5.88 5.88-5.88zm.24 57.24l.12-51.36c0-.24-.12-.36-.36-.36h-51c-.24 0-.36.12-.36.36v51.24zm-82.32-28.56h20.64v5.4h-20.64c-3.12 0-5.76-2.52-5.76-5.76v-51.12c0-3.24 2.64-5.76 5.76-5.76h51.12c3.24 0 5.88 2.52 5.88 5.76v18.36h-5.52v-18.36c0-.24-.12-.36-.36-.36h-51.12c-.12 0-.36.12-.36.36v51.12c0 .24.24.36.36.36zm-26.16-28.68h15.96v5.52h-15.96c-3.24 0-5.88-2.64-5.88-5.88v-51c0-3.24 2.64-5.88 5.88-5.88h51c3.24 0 5.88 2.64 5.88 5.88v18.36h-5.52v-18.36c0-.24-.12-.36-.36-.36h-51c-.24 0-.36.12-.36.36v51c0 .24.12.36.36.36z" stroke="#000" stroke-opacity="0" fill="#d45b07"/><path d="M483.54 776.5v56.58H509v28.58h31.46v28.23h56.24v-56.6h-31.35v-28.23h-25.57V776.5z" stroke="#000" stroke-opacity="0" fill-opacity="0"/><g><use xlink:href="#ak" transform="matrix(1,0,0,1,450,902) translate(41.05 14.4)"/><use xlink:href="#al" transform="matrix(1,0,0,1,450,902) translate(89 14.4)"/></g><defs><linearGradient gradientUnits="userSpaceOnUse" id="a" x1="820" y1="500" x2="940" y2="380"><stop offset="0%" stop-color="#c8511b"/><stop offset="100%" stop-color="#f90"/></linearGradient><path d="M30 0v-248h187v28H63v79h144v27H63v87h162V0H30" id="am"/><path d="M194 0L95-120 63-95V0H30v-248h33v124l119-124h40L117-140 236 0h-42" id="an"/><path d="M185-189c-5-48-123-54-124 2 14 75 158 14 163 119 3 78-121 87-175 55-17-10-28-26-33-46l33-7c5 56 141 63 141-1 0-78-155-14-162-118-5-82 145-84 179-34 5 7 8 16 11 25" id="ao"/><g id="b"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#am"/><use transform="matrix(0.05,0,0,0.05,12,0)" xlink:href="#an"/><use transform="matrix(0.05,0,0,0.05,24,0)" xlink:href="#ao"/></g><path fill="#dd3522" d="M212-179c-10-28-35-45-73-45-59 0-87 40-87 99 0 60 29 101 89 101 43 0 62-24 78-52l27 14C228-24 195 4 139 4 59 4 22-46 18-125c-6-104 99-153 187-111 19 9 31 26 39 46" id="ap"/><path fill="#dd3522" d="M24 0v-261h32V0H24" id="aq"/><path fill="#dd3522" d="M84 4C-5 8 30-112 23-190h32v120c0 31 7 50 39 49 72-2 45-101 50-169h31l1 190h-30c-1-10 1-25-2-33-11 22-28 36-60 37" id="ar"/><path fill="#dd3522" d="M135-143c-3-34-86-38-87 0 15 53 115 12 119 90S17 21 10-45l28-5c4 36 97 45 98 0-10-56-113-15-118-90-4-57 82-63 122-42 12 7 21 19 24 35" id="as"/><path fill="#dd3522" d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="at"/><path fill="#dd3522" d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="au"/><path fill="#dd3522" d="M114-163C36-179 61-72 57 0H25l-1-190h30c1 12-1 29 2 39 6-27 23-49 58-41v29" id="av"/><g id="c"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#ap"/><use transform="matrix(0.05,0,0,0.05,12.950000000000001,0)" xlink:href="#aq"/><use transform="matrix(0.05,0,0,0.05,16.900000000000002,0)" xlink:href="#ar"/><use transform="matrix(0.05,0,0,0.05,26.900000000000002,0)" xlink:href="#as"/><use transform="matrix(0.05,0,0,0.05,35.900000000000006,0)" xlink:href="#at"/><use transform="matrix(0.05,0,0,0.05,40.900000000000006,0)" xlink:href="#au"/><use transform="matrix(0.05,0,0,0.05,50.900000000000006,0)" xlink:href="#av"/></g><path fill="#dd3522" d="M185-189c-5-48-123-54-124 2 14 75 158 14 163 119 3 78-121 87-175 55-17-10-28-26-33-46l33-7c5 56 141 63 141-1 0-78-155-14-162-118-5-82 145-84 179-34 5 7 8 16 11 25" id="aw"/><path fill="#dd3522" d="M96-169c-40 0-48 33-48 73s9 75 48 75c24 0 41-14 43-38l32 2c-6 37-31 61-74 61-59 0-76-41-82-99-10-93 101-131 147-64 4 7 5 14 7 22l-32 3c-4-21-16-35-41-35" id="ax"/><path fill="#dd3522" d="M24-231v-30h32v30H24zM24 0v-190h32V0H24" id="ay"/><path fill="#dd3522" d="M179-190L93 31C79 59 56 82 12 73V49c39 6 53-20 64-50L1-190h34L92-34l54-156h33" id="az"/><g id="d"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#aw"/><use transform="matrix(0.05,0,0,0.05,12,0)" xlink:href="#au"/><use transform="matrix(0.05,0,0,0.05,22,0)" xlink:href="#ax"/><use transform="matrix(0.05,0,0,0.05,31,0)" xlink:href="#ar"/><use transform="matrix(0.05,0,0,0.05,41,0)" xlink:href="#av"/><use transform="matrix(0.05,0,0,0.05,46.95,0)" xlink:href="#ay"/><use transform="matrix(0.05,0,0,0.05,50.900000000000006,0)" xlink:href="#at"/><use transform="matrix(0.05,0,0,0.05,55.900000000000006,0)" xlink:href="#az"/></g><path fill="#dd3522" d="M143 4C61 4 22-44 18-125c-5-107 100-154 193-111 17 8 29 25 37 43l-32 9c-13-25-37-40-76-40-61 0-88 39-88 99 0 61 29 100 91 101 35 0 62-11 79-27v-45h-74v-28h105v86C228-13 192 4 143 4" id="aA"/><path fill="#dd3522" d="M100-194c62-1 85 37 85 99 1 63-27 99-86 99S16-35 15-95c0-66 28-99 85-99zM99-20c44 1 53-31 53-75 0-43-8-75-51-75s-53 32-53 75 10 74 51 75" id="aB"/><path fill="#dd3522" d="M115-194c55 1 70 41 70 98S169 2 115 4C84 4 66-9 55-30l1 105H24l-1-265h31l2 30c10-21 28-34 59-34zm-8 174c40 0 45-34 45-75s-6-73-45-74c-42 0-51 32-51 76 0 43 10 73 51 73" id="aC"/><g id="e"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#aA"/><use transform="matrix(0.05,0,0,0.05,14,0)" xlink:href="#av"/><use transform="matrix(0.05,0,0,0.05,19.95,0)" xlink:href="#aB"/><use transform="matrix(0.05,0,0,0.05,29.950000000000003,0)" xlink:href="#ar"/><use transform="matrix(0.05,0,0,0.05,39.95,0)" xlink:href="#aC"/></g><path fill="#dd3522" d="M190 0L58-211 59 0H30v-248h39L202-35l-2-213h31V0h-41" id="aD"/><path fill="#dd3522" d="M85-194c31 0 48 13 60 33l-1-100h32l1 261h-30c-2-10 0-23-3-31C134-8 116 4 85 4 32 4 16-35 15-94c0-66 23-100 70-100zm9 24c-40 0-46 34-46 75 0 40 6 74 45 74 42 0 51-32 51-76 0-42-9-74-50-73" id="aE"/><g id="f"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#aD"/><use transform="matrix(0.05,0,0,0.05,12.950000000000001,0)" xlink:href="#aB"/><use transform="matrix(0.05,0,0,0.05,22.950000000000003,0)" xlink:href="#aE"/><use transform="matrix(0.05,0,0,0.05,32.95,0)" xlink:href="#au"/></g><path fill="#dd3522" d="M80-196l47-18 7 23-49 13 32 44-20 13-27-46-27 45-21-12 33-44-49-13 8-23 47 19-2-53h23" id="aF"/><g id="g"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#aF"/><use transform="matrix(0.05,0,0,0.05,7,0)" xlink:href="#ap"/><use transform="matrix(0.05,0,0,0.05,19.950000000000003,0)" xlink:href="#aq"/><use transform="matrix(0.05,0,0,0.05,23.900000000000002,0)" xlink:href="#ar"/><use transform="matrix(0.05,0,0,0.05,33.900000000000006,0)" xlink:href="#as"/><use transform="matrix(0.05,0,0,0.05,42.900000000000006,0)" xlink:href="#at"/><use transform="matrix(0.05,0,0,0.05,47.900000000000006,0)" xlink:href="#au"/><use transform="matrix(0.05,0,0,0.05,57.900000000000006,0)" xlink:href="#av"/></g><path fill="#dd3522" d="M30-248c87 1 191-15 191 75 0 78-77 80-158 76V0H30v-248zm33 125c57 0 124 11 124-50 0-59-68-47-124-48v98" id="aG"/><path fill="#dd3522" d="M210-169c-67 3-38 105-44 169h-31v-121c0-29-5-50-35-48C34-165 62-65 56 0H25l-1-190h30c1 10-1 24 2 32 10-44 99-50 107 0 11-21 27-35 58-36 85-2 47 119 55 194h-31v-121c0-29-5-49-35-48" id="aH"/><path fill="#dd3522" d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="aI"/><g id="h"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#aG"/><use transform="matrix(0.05,0,0,0.05,12,0)" xlink:href="#av"/><use transform="matrix(0.05,0,0,0.05,17.95,0)" xlink:href="#ay"/><use transform="matrix(0.05,0,0,0.05,21.9,0)" xlink:href="#aH"/><use transform="matrix(0.05,0,0,0.05,36.85,0)" xlink:href="#aI"/><use transform="matrix(0.05,0,0,0.05,46.85,0)" xlink:href="#av"/><use transform="matrix(0.05,0,0,0.05,52.800000000000004,0)" xlink:href="#az"/></g><path fill="#333" d="M169-182c-1-43-94-46-97-3 18 66 151 10 154 114 3 95-165 93-204 36-6-8-10-19-12-30l50-8c3 46 112 56 116 5-17-69-150-10-154-114-4-87 153-88 188-35 5 8 8 18 10 28" id="aJ"/><path fill="#333" d="M185-48c-13 30-37 53-82 52C43 2 14-33 14-96s30-98 90-98c62 0 83 45 84 108H66c0 31 8 55 39 56 18 0 30-7 34-22zm-45-69c5-46-57-63-70-21-2 6-4 13-4 21h74" id="aK"/><path fill="#333" d="M25 0v-261h50V0H25" id="aL"/><path fill="#333" d="M121-226c-27-7-43 5-38 36h38v33H83V0H34v-157H6v-33h28c-9-59 32-81 87-68v32" id="aM"/><path fill="#333" d="M35-132v-50h50v50H35zM35 0v-49h50V0H35" id="aN"/><g id="i"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aJ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,11.851851851851853,0)" xlink:href="#aK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,21.7283950617284,0)" xlink:href="#aL"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,26.666666666666668,0)" xlink:href="#aM"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,32.543209876543216,0)" xlink:href="#aN"/></g><path fill="#333" d="M136-208V0H84v-208H4v-40h212v40h-80" id="aO"/><path fill="#333" d="M67-125c0 53 21 87 73 88 37 1 54-22 65-47l45 17C233-25 199 4 140 4 58 4 20-42 15-125 8-235 124-281 211-232c18 10 29 29 36 50l-46 12c-8-25-30-41-62-41-52 0-71 34-72 86" id="aP"/><path fill="#333" d="M24-248c93 1 206-16 204 79-1 75-69 88-152 82V0H24v-248zm52 121c47 0 100 7 100-41 0-47-54-39-100-39v80" id="aQ"/><g id="j"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,10.814814814814815,0)" xlink:href="#aP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,23.60493827160494,0)" xlink:href="#aQ"/></g><path fill="#333" d="M168-19C124 19 11 11 16-68c3-43 29-60 59-75-25-44-12-111 53-106 37 3 64 15 64 52 0 43-40 52-69 68 12 22 27 41 43 59 14-19 21-38 28-64l37 13c-8 29-19 52-34 74 12 10 32 16 52 10v35c-28 11-65-2-81-17zm-58-139c19-9 43-14 43-39 0-14-10-22-25-22-34 0-33 41-18 61zM60-68c-3 40 55 47 79 25-18-20-34-42-48-67-18 8-30 20-31 42" id="aR"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aR" id="k"/><path fill="#333" d="M238-95c0 69-44 99-111 99C63 4 22-25 22-93v-155h51v151c-1 38 19 59 55 60 90 1 49-130 58-211h52v153" id="aS"/><path fill="#333" d="M24-248c120-7 223 5 221 122C244-46 201 0 124 0H24v-248zM76-40c74 7 117-18 117-86 0-67-45-88-117-82v168" id="aT"/><path fill="#333" d="M4 7l51-268h42L46 7H4" id="aU"/><path fill="#333" d="M139-81c0-46-55-55-73-27H18l9-140h149v37H72l-4 63c44-38 133-4 122 66C201 21 21 35 11-62l49-4c5 18 15 30 39 30 26 0 40-18 40-45" id="aV"/><path fill="#333" d="M128-127c34 4 56 21 59 58 7 91-148 94-172 28-4-9-6-17-7-26l51-5c1 24 16 35 40 36 23 0 39-12 38-36-1-31-31-36-65-34v-40c32 2 59-3 59-33 0-20-13-33-34-33s-33 13-35 32l-50-3c6-44 37-68 86-68 50 0 83 20 83 66 0 35-22 52-53 58" id="aW"/><g id="l"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aS"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,12.790123456790125,0)" xlink:href="#aT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,25.58024691358025,0)" xlink:href="#aQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,37.4320987654321,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,42.370370370370374,0)" xlink:href="#aV"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,52.24691358024692,0)" xlink:href="#aW"/></g><path fill="#333" d="M23 0v-37h61v-169l-59 37v-38l62-41h46v211h57V0H23" id="aX"/><path fill="#333" d="M101-251c68 0 84 54 84 127C185-50 166 4 99 4S15-52 14-124c-1-75 17-127 87-127zm-1 216c37-5 36-46 36-89s4-89-36-89c-39 0-36 45-36 89 0 43-3 85 36 89" id="aY"/><path fill="#333" d="M182-182c0 78-84 86-111 141h115V0H12c-6-101 99-100 120-180 1-22-12-31-33-32-23 0-32 14-35 34l-49-3c5-45 32-70 84-70 51 0 83 22 83 69" id="aZ"/><path fill="#333" d="M14-72v-43h91v43H14" id="ba"/><path fill="#333" d="M115-159c48 0 72 30 72 78 0 54-30 85-83 85-64 0-91-50-91-122 0-98 58-163 141-120 15 8 21 24 27 44l-47 6c-5-31-48-31-61-4-7 14-11 33-11 60 9-17 28-27 53-27zM102-35c24 0 36-20 36-45s-11-43-37-43c-23 0-36 14-36 38 0 27 11 50 37 50" id="bb"/><g id="m"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,10.814814814814815,0)" xlink:href="#aP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,23.60493827160494,0)" xlink:href="#aQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,35.45679012345679,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,40.39506172839506,0)" xlink:href="#aX"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,50.27160493827161,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,60.14814814814815,0)" xlink:href="#aZ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,70.0246913580247,0)" xlink:href="#aV"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,79.90123456790124,0)" xlink:href="#ba"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,85.77777777777779,0)" xlink:href="#bb"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,95.65432098765433,0)" xlink:href="#aV"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,105.53086419753087,0)" xlink:href="#aV"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,115.40740740740742,0)" xlink:href="#aW"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,125.28395061728396,0)" xlink:href="#aV"/></g><path fill="#333" d="M20 75v-336h95v34H67V41h48v34H20" id="bc"/><path fill="#333" d="M140-251c80 0 125 45 125 126S219 4 139 4C58 4 15-44 15-125s44-126 125-126zm-1 214c52 0 73-35 73-88 0-50-21-86-72-86-52 0-73 35-73 86s22 88 72 88" id="bd"/><path fill="#333" d="M135-194c53 0 70 44 70 98 0 56-19 98-73 100-31 1-45-17-59-34 3 33 2 69 2 105H25l-1-265h48c2 10 0 23 3 31 11-24 29-35 60-35zM114-30c33 0 39-31 40-66 0-38-9-64-40-64-56 0-55 130 0 130" id="be"/><path fill="#333" d="M115-3C79 11 28 4 28-45v-112H4v-33h27l15-45h31v45h36v33H77v99c-1 23 16 31 38 25v30" id="bf"/><path fill="#333" d="M4 75V41h49v-268H4v-34h96V75H4" id="bg"/><g id="n"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#bc"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,5.8765432098765435,0)" xlink:href="#bd"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,19.703703703703706,0)" xlink:href="#be"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,30.518518518518523,0)" xlink:href="#bf"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,36.39506172839506,0)" xlink:href="#bg"/></g><path fill="#333" d="M127-220V0H93v-220H8v-28h204v28h-85" id="bh"/><path fill="#333" d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="bi"/><path fill="#333" d="M177-190C167-65 218 103 67 71c-23-6-38-20-44-43l32-5c15 47 100 32 89-28v-30C133-14 115 1 83 1 29 1 15-40 15-95c0-56 16-97 71-98 29-1 48 16 59 35 1-10 0-23 2-32h30zM94-22c36 0 50-32 50-73 0-42-14-75-50-75-39 0-46 34-46 75s6 73 46 73" id="bj"/><g id="o"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bh"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,11.049382716049381,0)" xlink:href="#bi"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,23.39506172839506,0)" xlink:href="#bj"/></g><path fill="#333" d="M16-82v-28h88v28H16" id="bk"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bk" id="p"/><path fill="#333" d="M39-94c74 12-11 154 75 146v23c-44 4-70-10-70-52C44-23 55-84 6-82v-22c81 4-7-162 84-157h24v23c-82-15-2 131-75 144" id="bl"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bl" id="q"/><path d="M143 0L79-87 56-68V0H24v-261h32v163l83-92h37l-77 82L181 0h-38" id="bm"/><path d="M84 4C-5 8 30-112 23-190h32v120c0 31 7 50 39 49 72-2 45-101 50-169h31l1 190h-30c-1-10 1-25-2-33-11 22-28 36-60 37" id="bn"/><path d="M115-194c53 0 69 39 70 98 0 66-23 100-70 100C84 3 66-7 56-30L54 0H23l1-261h32v101c10-23 28-34 59-34zm-8 174c40 0 45-34 45-75 0-40-5-75-45-74-42 0-51 32-51 76 0 43 10 73 51 73" id="bo"/><path d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="bp"/><path d="M114-163C36-179 61-72 57 0H25l-1-190h30c1 12-1 29 2 39 6-27 23-49 58-41v29" id="bq"/><path d="M117-194c89-4 53 116 60 194h-32v-121c0-31-8-49-39-48C34-167 62-67 57 0H25l-1-190h30c1 10-1 24 2 32 11-22 29-35 61-36" id="br"/><path d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="bs"/><path d="M135-143c-3-34-86-38-87 0 15 53 115 12 119 90S17 21 10-45l28-5c4 36 97 45 98 0-10-56-113-15-118-90-4-57 82-63 122-42 12 7 21 19 24 35" id="bt"/><path d="M33 0v-38h34V0H33" id="bu"/><path d="M24-231v-30h32v30H24zM24 0v-190h32V0H24" id="bv"/><path d="M100-194c62-1 85 37 85 99 1 63-27 99-86 99S16-35 15-95c0-66 28-99 85-99zM99-20c44 1 53-31 53-75 0-43-8-75-51-75s-53 32-53 75 10 74 51 75" id="bw"/><path d="M0 4l72-265h28L28 4H0" id="bx"/><path d="M96-169c-40 0-48 33-48 73s9 75 48 75c24 0 41-14 43-38l32 2c-6 37-31 61-74 61-59 0-76-41-82-99-10-93 101-131 147-64 4 7 5 14 7 22l-32 3c-4-21-16-35-41-35" id="by"/><path d="M24 0v-261h32V0H24" id="bz"/><path d="M18-100v-36l175-74v27L42-118l151 64v27" id="bA"/><path d="M16-82v-28h88v28H16" id="bB"/><path d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="bC"/><path d="M210-169c-67 3-38 105-44 169h-31v-121c0-29-5-50-35-48C34-165 62-65 56 0H25l-1-190h30c1 10-1 24 2 32 10-44 99-50 107 0 11-21 27-35 58-36 85-2 47 119 55 194h-31v-121c0-29-5-49-35-48" id="bD"/><path d="M18-27v-27l151-64-151-65v-27l175 74v36" id="bE"/><g id="r"><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,0,0)" xlink:href="#bm"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,8.666666666666668,0)" xlink:href="#bn"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,18.296296296296298,0)" xlink:href="#bo"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,27.92592592592593,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,37.555555555555564,0)" xlink:href="#bq"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,43.28518518518519,0)" xlink:href="#br"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,52.914814814814825,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,62.54444444444445,0)" xlink:href="#bs"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,67.35925925925928,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,76.98888888888891,0)" xlink:href="#bt"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,85.65555555555558,0)" xlink:href="#bu"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,90.47037037037039,0)" xlink:href="#bv"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,94.2740740740741,0)" xlink:href="#bw"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,103.90370370370371,0)" xlink:href="#bx"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,108.71851851851854,0)" xlink:href="#by"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,117.38518518518521,0)" xlink:href="#bz"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,121.18888888888891,0)" xlink:href="#bn"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,130.81851851851854,0)" xlink:href="#bt"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,139.4851851851852,0)" xlink:href="#bs"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,144.3,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,153.92962962962963,0)" xlink:href="#bq"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,159.65925925925927,0)" xlink:href="#bx"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,164.47407407407408,0)" xlink:href="#bA"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,174.5851851851852,0)" xlink:href="#by"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,183.25185185185185,0)" xlink:href="#bz"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,187.05555555555557,0)" xlink:href="#bn"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,196.6851851851852,0)" xlink:href="#bt"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,205.35185185185185,0)" xlink:href="#bs"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,210.16666666666666,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,219.79629629629628,0)" xlink:href="#bq"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,225.52592592592592,0)" xlink:href="#bB"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,231.25555555555553,0)" xlink:href="#br"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,240.88518518518518,0)" xlink:href="#bC"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,250.5148148148148,0)" xlink:href="#bD"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,264.9111111111111,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,274.5407407407407,0)" xlink:href="#bE"/><path fill="#8080ff" d="M-.87 1.2h286.4v1.27H-.88z"/></g><path d="M33-154v-36h34v36H33zM33 0v-36h34V0H33" id="bF"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,0,0)" xlink:href="#bF" id="s"/><path d="M206 0h-36l-40-164L89 0H53L-1-190h32L70-26l43-164h34l41 164 42-164h31" id="bG"/><path d="M85-194c31 0 48 13 60 33l-1-100h32l1 261h-30c-2-10 0-23-3-31C134-8 116 4 85 4 32 4 16-35 15-94c0-66 23-100 70-100zm9 24c-40 0-46 34-46 75 0 40 6 74 45 74 42 0 51-32 51-76 0-42-9-74-50-73" id="bH"/><g id="t"><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,0,0)" xlink:href="#bw"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,9.629629629629632,0)" xlink:href="#bG"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,22.1,0)" xlink:href="#br"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,31.729629629629635,0)" xlink:href="#bp"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,41.35925925925927,0)" xlink:href="#bH"/></g><path d="M76-40C78 24 84 88 6 75V52C86 64 9-79 80-94c-40-6-34-59-34-106 1-29-11-41-40-38v-23c44-4 70 10 70 52 0 47-12 108 38 105v22c-26 1-39 14-38 42" id="bI"/><use transform="matrix(0.048148148148148155,0,0,0.048148148148148155,0,0)" xlink:href="#bI" id="u"/><path fill="#333" d="M199 0l-22-63H83L61 0H9l90-248h61L250 0h-51zm-33-102l-36-108c-10 38-24 72-36 108h72" id="bJ"/><g id="v"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#bJ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,12.790123456790125,0)" xlink:href="#aL"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,17.7283950617284,0)" xlink:href="#aL"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,22.66666666666667,0)" xlink:href="#aN"/></g><path fill="#333" d="M24 0v-54h51V0H24" id="bK"/><g id="w"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,9.876543209876544,0)" xlink:href="#bK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,14.814814814814817,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,24.69135802469136,0)" xlink:href="#bK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,29.629629629629633,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,39.50617283950618,0)" xlink:href="#bK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,44.44444444444445,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,54.320987654320994,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,59.25925925925927,0)" xlink:href="#aY"/></g><g id="x"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#bc"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,5.8765432098765435,0)" xlink:href="#bd"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,19.703703703703706,0)" xlink:href="#be"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,30.518518518518523,0)" xlink:href="#bf"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,36.39506172839506,0)" xlink:href="#bg"/></g><path fill="#333" d="M30-248c87 1 191-15 191 75 0 78-77 80-158 76V0H30v-248zm33 125c57 0 124 11 124-50 0-59-68-47-124-48v98" id="bL"/><path fill="#333" d="M84 4C-5 8 30-112 23-190h32v120c0 31 7 50 39 49 72-2 45-101 50-169h31l1 190h-30c-1-10 1-25-2-33-11 22-28 36-60 37" id="bM"/><path fill="#333" d="M115-194c53 0 69 39 70 98 0 66-23 100-70 100C84 3 66-7 56-30L54 0H23l1-261h32v101c10-23 28-34 59-34zm-8 174c40 0 45-34 45-75 0-40-5-75-45-74-42 0-51 32-51 76 0 43 10 73 51 73" id="bN"/><path fill="#333" d="M24 0v-261h32V0H24" id="bO"/><path fill="#333" d="M24-231v-30h32v30H24zM24 0v-190h32V0H24" id="bP"/><path fill="#333" d="M96-169c-40 0-48 33-48 73s9 75 48 75c24 0 41-14 43-38l32 2c-6 37-31 61-74 61-59 0-76-41-82-99-10-93 101-131 147-64 4 7 5 14 7 22l-32 3c-4-21-16-35-41-35" id="bQ"/><g id="y"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bL"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,14.814814814814813,0)" xlink:href="#bM"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,27.160493827160494,0)" xlink:href="#bN"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,39.50617283950617,0)" xlink:href="#bO"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,44.382716049382715,0)" xlink:href="#bP"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,49.25925925925926,0)" xlink:href="#bQ"/></g><path fill="#333" d="M33 0v-248h34V0H33" id="bR"/><path fill="#333" d="M117-194c89-4 53 116 60 194h-32v-121c0-31-8-49-39-48C34-167 62-67 57 0H25l-1-190h30c1 10-1 24 2 32 11-22 29-35 61-36" id="bS"/><path fill="#333" d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="bT"/><path fill="#333" d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="bU"/><path fill="#333" d="M114-163C36-179 61-72 57 0H25l-1-190h30c1 12-1 29 2 39 6-27 23-49 58-41v29" id="bV"/><g id="z"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bR"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,6.172839506172839,0)" xlink:href="#bS"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,18.51851851851852,0)" xlink:href="#bT"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,24.691358024691358,0)" xlink:href="#bU"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,37.03703703703704,0)" xlink:href="#bV"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,44.382716049382715,0)" xlink:href="#bS"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,56.72839506172839,0)" xlink:href="#bU"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,69.07407407407408,0)" xlink:href="#bT"/></g><path d="M80-196l47-18 7 23-49 13 32 44-20 13-27-46-27 45-21-12 33-44-49-13 8-23 47 19-2-53h23" id="bW"/><path d="M212-179c-10-28-35-45-73-45-59 0-87 40-87 99 0 60 29 101 89 101 43 0 62-24 78-52l27 14C228-24 195 4 139 4 59 4 22-46 18-125c-6-104 99-153 187-111 19 9 31 26 39 46" id="bX"/><g id="A"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bW"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,7.777777777777777,0)" xlink:href="#bX"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,22.166666666666664,0)" xlink:href="#bz"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,26.555555555555554,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,37.666666666666664,0)" xlink:href="#bt"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,47.666666666666664,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,53.22222222222222,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,64.33333333333333,0)" xlink:href="#bq"/></g><path d="M30-248c87 1 191-15 191 75 0 78-77 80-158 76V0H30v-248zm33 125c57 0 124 11 124-50 0-59-68-47-124-48v98" id="bY"/><path d="M179-190L93 31C79 59 56 82 12 73V49c39 6 53-20 64-50L1-190h34L92-34l54-156h33" id="bZ"/><g id="B"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bY"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,13.33333333333333,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,19.94444444444444,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,24.33333333333333,0)" xlink:href="#bD"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,40.944444444444436,0)" xlink:href="#bC"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,52.05555555555554,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,58.66666666666666,0)" xlink:href="#bZ"/></g><g id="C"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#ao"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,13.33333333333333,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,24.44444444444444,0)" xlink:href="#by"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,34.444444444444436,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,45.55555555555554,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,52.16666666666665,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,56.555555555555536,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,62.111111111111086,0)" xlink:href="#bZ"/></g><path d="M143 4C61 4 22-44 18-125c-5-107 100-154 193-111 17 8 29 25 37 43l-32 9c-13-25-37-40-76-40-61 0-88 39-88 99 0 61 29 100 91 101 35 0 62-11 79-27v-45h-74v-28h105v86C228-13 192 4 143 4" id="ca"/><path d="M115-194c55 1 70 41 70 98S169 2 115 4C84 4 66-9 55-30l1 105H24l-1-265h31l2 30c10-21 28-34 59-34zm-8 174c40 0 45-34 45-75s-6-73-45-74c-42 0-51 32-51 76 0 43 10 73 51 73" id="cb"/><g id="D"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#ca"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,15.555555555555554,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,22.166666666666664,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,33.27777777777777,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,44.38888888888888,0)" xlink:href="#cb"/></g><g id="E"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,4.388888888888888,0)" xlink:href="#bt"/></g><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bC" id="F"/><g id="G"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bt"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,9.999999999999998,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,21.111111111111107,0)" xlink:href="#by"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,31.111111111111107,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,42.222222222222214,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,48.83333333333332,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,53.22222222222221,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,58.777777777777764,0)" xlink:href="#bZ"/></g><path d="M177-190C167-65 218 103 67 71c-23-6-38-20-44-43l32-5c15 47 100 32 89-28v-30C133-14 115 1 83 1 29 1 15-40 15-95c0-56 16-97 71-98 29-1 48 16 59 35 1-10 0-23 2-32h30zM94-22c36 0 50-32 50-73 0-42-14-75-50-75-39 0-46 34-46 75s6 73 46 73" id="cc"/><g id="H"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#cc"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,17.722222222222218,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,28.83333333333333,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,39.944444444444436,0)" xlink:href="#cb"/></g><g id="I"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bC"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,22.222222222222218,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.77777777777777,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,38.88888888888888,0)" xlink:href="#bD"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,55.499999999999986,0)" xlink:href="#bC"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,66.6111111111111,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,72.16666666666666,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,76.55555555555554,0)" xlink:href="#by"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,86.55555555555554,0)" xlink:href="#bC"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,97.66666666666666,0)" xlink:href="#bz"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,102.05555555555554,0)" xlink:href="#bz"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,106.44444444444443,0)" xlink:href="#bZ"/></g><g id="J"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#by"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,9.999999999999998,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.611111111111107,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.722222222222214,0)" xlink:href="#bC"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,38.83333333333332,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,44.38888888888888,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,55.499999999999986,0)" xlink:href="#bH"/></g><path d="M101-234c-31-9-42 10-38 44h38v23H63V0H32v-167H5v-23h27c-7-52 17-82 69-68v24" id="cd"/><g id="K"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#cd"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.666666666666664,0)" xlink:href="#bq"/></g><path d="M106-169C34-169 62-67 57 0H25v-261h32l-1 103c12-21 28-36 61-36 89 0 53 116 60 194h-32v-121c2-32-8-49-39-48" id="ce"/><g id="L"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#ce"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.666666666666664,0)" xlink:href="#bp"/></g><g id="M"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#by"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,9.999999999999998,0)" xlink:href="#bz"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,14.388888888888886,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,25.499999999999993,0)" xlink:href="#bt"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,35.49999999999999,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,41.05555555555555,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,52.16666666666666,0)" xlink:href="#bq"/></g><g id="N"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bo"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#bZ"/></g><g id="O"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#am"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,13.33333333333333,0)" xlink:href="#an"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,26.66666666666666,0)" xlink:href="#ao"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,39.99999999999999,0)" xlink:href="#bu"/></g><path d="M127-220V0H93v-220H8v-28h204v28h-85" id="cf"/><g id="P"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#cf"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,12.166666666666664,0)" xlink:href="#ce"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,23.27777777777777,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.66666666666666,0)" xlink:href="#bt"/></g><g id="Q"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bt"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,9.999999999999998,0)" xlink:href="#bp"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,21.111111111111107,0)" xlink:href="#by"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,31.111111111111107,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,42.222222222222214,0)" xlink:href="#bq"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,48.83333333333332,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,53.22222222222221,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,58.777777777777764,0)" xlink:href="#bZ"/></g><path d="M33 0v-248h34V0H33" id="cg"/><path d="M30-248c118-7 216 8 213 122C240-48 200 0 122 0H30v-248zM63-27c89 8 146-16 146-99s-60-101-146-95v194" id="ch"/><g id="R"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#cg"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#ch"/></g><g id="S"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bt"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,9.999999999999998,0)" xlink:href="#ce"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,21.111111111111107,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,32.222222222222214,0)" xlink:href="#bG"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,46.6111111111111,0)" xlink:href="#br"/></g><g id="T"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bv"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,4.388888888888888,0)" xlink:href="#br"/></g><g id="U"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bD"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.611111111111107,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.722222222222214,0)" xlink:href="#bH"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,38.83333333333332,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,49.94444444444443,0)" xlink:href="#bz"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,54.333333333333314,0)" xlink:href="#bp"/></g><g id="V"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#bw"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,22.222222222222218,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.77777777777777,0)" xlink:href="#cb"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,38.88888888888888,0)" xlink:href="#bn"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,49.999999999999986,0)" xlink:href="#bs"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,55.55555555555554,0)" xlink:href="#bF"/></g><path d="M172-543c0-365 197-575 553-575 119 0 231 20 336 59l-62 158c-100-38-189-57-268-57-247 0-371 138-371 413 0 271 120 406 361 406 105 0 212-21 321-62v160C953 0 843 20 713 20c-347 1-541-210-541-563" id="ci"/><path d="M532-1413l-268-21v-122h451v1413l352 20V0H188v-123l344-20v-1270" id="cj"/><path d="M578-131c239 0 309-137 309-397v-570h182V0H922l-27-147h-10C815-36 704 20 551 20c-261 0-391-134-391-401v-717h182v707c0 173 79 260 236 260" id="ck"/><path d="M1026-301c0 225-197 321-448 321-165 0-290-23-375-69v-166c129 57 252 86 370 86 183 0 275-54 275-162 0-38-16-71-49-99s-108-63-226-106c-156-57-253-112-293-160s-61-105-61-171c2-338 524-340 789-217l-60 149c-121-48-228-72-319-72-157 0-236 44-236 133 0 41 17 74 51 98s111 57 232 101c141 51 234 101 280 151s70 111 70 183" id="cl"/><path d="M1020-150v138C933 9 849 20 768 20 527 20 406-95 406-324v-637H139v-94l267-49 77-287h105v293h438v137H588v637c0 130 64 195 192 195 62 0 142-7 240-21" id="cm"/><path d="M631-1118c283 0 470 204 465 491v113H322c5 250 120 375 344 375 130 0 253-25 370-76v160C925-5 804 20 672 20c-329 0-539-224-539-561 0-331 182-577 498-577zM326-662h573c0-203-91-305-272-305-183 0-284 102-301 305" id="cn"/><path d="M442-897c93-139 173-221 377-221 79 0 159 15 240 45l-49 166c-82-30-157-45-224-45-221 0-340 134-340 360V0H264v-1098h148l22 201h8" id="co"/><path d="M1243 324H-16V184h1259v140" id="cp"/><path d="M1092-551c0 322-136 571-430 571-138 0-245-53-322-159h-12c24 220 7 402 12 631H158v-1590h147l27 148h8c73-112 181-168 322-168 294 0 430 246 430 567zM629-967c-241 0-288 156-289 416 0 151 24 259 70 323s120 97 221 97c181 0 272-141 272-422 0-276-91-414-274-414" id="cq"/><path d="M739-1436c0 71-43 115-106 115-71 0-107-38-107-115 0-76 36-114 107-114s106 38 106 114zM541-954l-269-21v-123h451v955l352 20V0H197v-123l344-20v-811" id="cr"/><path d="M977 0v-707c0-97-9-165-27-203s-47-57-88-57c-58 0-100 28-126 83s-40 148-40 278V0H535v-707c0-173-42-260-125-260-56 0-96 26-120 79s-36 160-36 319V0H92v-1098h127l27 148h10c45-112 112-168 201-168 109 0 180 61 213 182h6c51-121 124-182 219-182 85 0 147 30 186 92s58 165 58 309V0H977" id="cs"/><path d="M596-1118c288 2 430 92 430 366V0H895l-37-152h-8C752-34 680 17 483 20 272 23 135-98 135-307c0-221 170-337 510-348l203-7c15-197-55-305-244-305-98 0-207 27-328 82l-63-137c131-64 258-96 383-96zm-72 991c194 0 325-116 322-311v-99c-251 15-522-6-522 232 0 119 67 178 200 178" id="ct"/><path d="M82-1098h188l262 654c55 137 84 233 89 290h6c15-75 45-172 90-292l239-652h189L670 143c-45 116-99 201-156 261-87 92-259 104-414 71V330c92 17 226 18 286-29 67-53 115-195 153-291" id="cu"/><path d="M188-739c-1-248 156-381 410-379 57 0 107 7 150 20h378v113l-196 27c43 57 65 128 65 213 2 253-212 385-489 346-67 37-100 82-100 133 0 56 54 84 161 84h187c216-2 358 93 358 297 0 251-188 377-565 377-145 0-255-26-331-80S102 283 102 186c0-141 80-231 240-270-64-31-96-83-96-154 0-75 44-139 133-192-109-41-191-166-191-309zm406 221c149 0 223-77 223-230 0-159-75-239-225-239s-225 81-225 242c0 151 76 227 227 227zm346 649c0-146-78-149-248-149H504c-153 0-230 66-230 198 0 116 90 174 271 174 263 0 395-74 395-223" id="cv"/><path d="M1112-551c0 331-186 571-502 571-301 0-495-251-495-571 0-330 184-567 501-567 302 0 496 249 496 567zM614-131c207 0 310-140 310-420 0-277-104-416-312-416-206 0-309 139-309 416 0 280 104 420 311 420" id="cw"/><path d="M137-547c0-322 136-571 430-571 137 0 245 53 322 160h12c-24-215-7-376-12-598h182V0H924l-27-147h-8C814-36 706 20 567 20c-294 0-430-246-430-567zm463 416c241 0 288-157 289-416 0-151-23-259-69-323s-121-97-222-97c-181 0-272 141-272 422 0 276 91 414 274 414" id="cx"/><g id="W"><use transform="matrix(0.0078125,0,0,0.0078125,0,0)" xlink:href="#ci"/><use transform="matrix(0.0078125,0,0,0.0078125,9.6015625,0)" xlink:href="#cj"/><use transform="matrix(0.0078125,0,0,0.0078125,19.203125,0)" xlink:href="#ck"/><use transform="matrix(0.0078125,0,0,0.0078125,28.8046875,0)" xlink:href="#cl"/><use transform="matrix(0.0078125,0,0,0.0078125,38.40625,0)" xlink:href="#cm"/><use transform="matrix(0.0078125,0,0,0.0078125,48.0078125,0)" xlink:href="#cn"/><use transform="matrix(0.0078125,0,0,0.0078125,57.609375,0)" xlink:href="#co"/><use transform="matrix(0.0078125,0,0,0.0078125,67.2109375,0)" xlink:href="#cp"/><use transform="matrix(0.0078125,0,0,0.0078125,76.8125,0)" xlink:href="#cq"/><use transform="matrix(0.0078125,0,0,0.0078125,86.4140625,0)" xlink:href="#co"/><use transform="matrix(0.0078125,0,0,0.0078125,96.015625,0)" xlink:href="#cr"/><use transform="matrix(0.0078125,0,0,0.0078125,105.6171875,0)" xlink:href="#cs"/><use transform="matrix(0.0078125,0,0,0.0078125,115.21875,0)" xlink:href="#ct"/><use transform="matrix(0.0078125,0,0,0.0078125,124.8203125,0)" xlink:href="#co"/><use transform="matrix(0.0078125,0,0,0.0078125,134.421875,0)" xlink:href="#cu"/><use transform="matrix(0.0078125,0,0,0.0078125,144.0234375,0)" xlink:href="#cp"/><use transform="matrix(0.0078125,0,0,0.0078125,153.625,0)" xlink:href="#cl"/><use transform="matrix(0.0078125,0,0,0.0078125,163.2265625,0)" xlink:href="#cn"/><use transform="matrix(0.0078125,0,0,0.0078125,172.828125,0)" xlink:href="#ci"/><use transform="matrix(0.0078125,0,0,0.0078125,182.4296875,0)" xlink:href="#ck"/><use transform="matrix(0.0078125,0,0,0.0078125,192.03125,0)" xlink:href="#co"/><use transform="matrix(0.0078125,0,0,0.0078125,201.6328125,0)" xlink:href="#cr"/><use transform="matrix(0.0078125,0,0,0.0078125,211.234375,0)" xlink:href="#cm"/><use transform="matrix(0.0078125,0,0,0.0078125,220.8359375,0)" xlink:href="#cu"/><use transform="matrix(0.0078125,0,0,0.0078125,230.4375,0)" xlink:href="#cp"/><use transform="matrix(0.0078125,0,0,0.0078125,240.0390625,0)" xlink:href="#cv"/><use transform="matrix(0.0078125,0,0,0.0078125,249.640625,0)" xlink:href="#co"/><use transform="matrix(0.0078125,0,0,0.0078125,259.2421875,0)" xlink:href="#cw"/><use transform="matrix(0.0078125,0,0,0.0078125,268.84375,0)" xlink:href="#ck"/><use transform="matrix(0.0078125,0,0,0.0078125,278.4453125,0)" xlink:href="#cq"/><use transform="matrix(0.0078125,0,0,0.0078125,288.046875,0)" xlink:href="#cp"/><use transform="matrix(0.0078125,0,0,0.0078125,297.6484375,0)" xlink:href="#cr"/><use transform="matrix(0.0078125,0,0,0.0078125,307.25,0)" xlink:href="#cx"/></g><path fill="#008a0e" d="M30-248c118-7 216 8 213 122C240-48 200 0 122 0H30v-248zM63-27c89 8 146-16 146-99s-60-101-146-95v194" id="cy"/><path fill="#008a0e" d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="cz"/><path fill="#008a0e" d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="cA"/><g id="X"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#cy"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#cz"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,28.333333333333332,0)" xlink:href="#cA"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,34.50617283950617,0)" xlink:href="#cz"/></g><path fill="#008a0e" d="M30-248c87 1 191-15 191 75 0 78-77 80-158 76V0H30v-248zm33 125c57 0 124 11 124-50 0-59-68-47-124-48v98" id="cB"/><path fill="#008a0e" d="M24 0v-261h32V0H24" id="cC"/><path fill="#008a0e" d="M117-194c89-4 53 116 60 194h-32v-121c0-31-8-49-39-48C34-167 62-67 57 0H25l-1-190h30c1 10-1 24 2 32 11-22 29-35 61-36" id="cD"/><path fill="#008a0e" d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="cE"/><g id="Y"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#cB"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,14.814814814814813,0)" xlink:href="#cC"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,19.691358024691354,0)" xlink:href="#cz"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,32.03703703703703,0)" xlink:href="#cD"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,44.38271604938271,0)" xlink:href="#cE"/></g><path fill="#1071e5" d="M212-179c-10-28-35-45-73-45-59 0-87 40-87 99 0 60 29 101 89 101 43 0 62-24 78-52l27 14C228-24 195 4 139 4 59 4 22-46 18-125c-6-104 99-153 187-111 19 9 31 26 39 46" id="cF"/><path fill="#1071e5" d="M100-194c62-1 85 37 85 99 1 63-27 99-86 99S16-35 15-95c0-66 28-99 85-99zM99-20c44 1 53-31 53-75 0-43-8-75-51-75s-53 32-53 75 10 74 51 75" id="cG"/><path fill="#1071e5" d="M117-194c89-4 53 116 60 194h-32v-121c0-31-8-49-39-48C34-167 62-67 57 0H25l-1-190h30c1 10-1 24 2 32 11-22 29-35 61-36" id="cH"/><path fill="#1071e5" d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="cI"/><path fill="#1071e5" d="M114-163C36-179 61-72 57 0H25l-1-190h30c1 12-1 29 2 39 6-27 23-49 58-41v29" id="cJ"/><path fill="#1071e5" d="M24 0v-261h32V0H24" id="cK"/><g id="Z"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#cF"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,15.987654320987653,0)" xlink:href="#cG"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,28.333333333333332,0)" xlink:href="#cH"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,40.67901234567901,0)" xlink:href="#cI"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,46.851851851851855,0)" xlink:href="#cJ"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,54.19753086419753,0)" xlink:href="#cG"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,66.54320987654322,0)" xlink:href="#cK"/></g><path fill="#1071e5" d="M30-248c87 1 191-15 191 75 0 78-77 80-158 76V0H30v-248zm33 125c57 0 124 11 124-50 0-59-68-47-124-48v98" id="cL"/><path fill="#1071e5" d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="cM"/><path fill="#1071e5" d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="cN"/><g id="aa"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#cL"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,14.814814814814813,0)" xlink:href="#cK"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,19.691358024691354,0)" xlink:href="#cM"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,32.03703703703703,0)" xlink:href="#cH"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,44.38271604938271,0)" xlink:href="#cN"/></g><g id="ab"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,10.814814814814815,0)" xlink:href="#aP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,23.60493827160494,0)" xlink:href="#aQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,35.45679012345679,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,40.39506172839506,0)" xlink:href="#aX"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,50.27160493827161,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,60.14814814814815,0)" xlink:href="#aZ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,70.0246913580247,0)" xlink:href="#aV"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,79.90123456790124,0)" xlink:href="#aY"/></g><path fill="#333" d="M67-93c0 74 22 123 53 168H70C40 30 18-18 18-93s22-123 52-168h50c-32 44-53 94-53 168" id="cO"/><path fill="#333" d="M195 0l-88-114-31 24V0H24v-248h52v113l112-113h60L142-143 257 0h-62" id="cP"/><path fill="#333" d="M85 4C-2 5 27-109 22-190h50c7 57-23 150 33 157 60-5 35-97 40-157h50l1 190h-47c-2-12 1-28-3-38-12 25-28 42-61 42" id="cQ"/><path fill="#333" d="M135-194c52 0 70 43 70 98 0 56-19 99-73 100-30 1-46-15-58-35L72 0H24l1-261h50v104c11-23 29-37 60-37zM114-30c31 0 40-27 40-66 0-37-7-63-39-63s-41 28-41 65c0 36 8 64 40 64" id="cR"/><path fill="#333" d="M102-93c0 74-22 123-52 168H0C30 29 54-18 53-93c0-74-22-123-53-168h50c30 45 52 94 52 168" id="cS"/><g id="ac"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#cO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,5.8765432098765435,0)" xlink:href="#cP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,18.666666666666668,0)" xlink:href="#cQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,29.481481481481477,0)" xlink:href="#cR"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,40.2962962962963,0)" xlink:href="#aK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,50.17283950617284,0)" xlink:href="#aL"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,55.111111111111114,0)" xlink:href="#aK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,64.98765432098766,0)" xlink:href="#bf"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,70.8641975308642,0)" xlink:href="#cS"/></g><path fill="#333" d="M165-50V0h-47v-50H5v-38l105-160h55v161h33v37h-33zm-47-37l2-116L46-87h72" id="cT"/><g id="ad"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,10.814814814814815,0)" xlink:href="#aP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,23.60493827160494,0)" xlink:href="#aQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,35.45679012345679,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,40.39506172839506,0)" xlink:href="#cT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,50.27160493827161,0)" xlink:href="#cT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,60.14814814814815,0)" xlink:href="#aW"/></g><g id="ae"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,9.876543209876544,0)" xlink:href="#bK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,14.814814814814817,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,24.69135802469136,0)" xlink:href="#bK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,29.629629629629633,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,39.50617283950618,0)" xlink:href="#bK"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,44.44444444444445,0)" xlink:href="#aY"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,54.320987654320994,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,59.25925925925927,0)" xlink:href="#aY"/></g><g id="af"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bL"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,14.814814814814813,0)" xlink:href="#bM"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,27.160493827160494,0)" xlink:href="#bN"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,39.50617283950617,0)" xlink:href="#bO"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,44.382716049382715,0)" xlink:href="#bP"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,49.25925925925926,0)" xlink:href="#bQ"/></g><g id="ag"><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,0,0)" xlink:href="#bR"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,6.172839506172839,0)" xlink:href="#bS"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,18.51851851851852,0)" xlink:href="#bT"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,24.691358024691358,0)" xlink:href="#bU"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,37.03703703703704,0)" xlink:href="#bV"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,44.382716049382715,0)" xlink:href="#bS"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,56.72839506172839,0)" xlink:href="#bU"/><use transform="matrix(0.06172839506172839,0,0,0.06172839506172839,69.07407407407408,0)" xlink:href="#bT"/></g><path fill="#333" d="M138-131c27 9 52 24 51 61 0 53-36 74-89 74S11-19 11-69c0-35 22-54 51-61-78-25-46-121 38-121 51 0 83 19 83 66 0 30-18 49-45 54zm-38-16c24 0 32-13 32-36 1-23-11-34-32-34-22 0-33 12-32 34 0 22 9 36 32 36zm1 116c27 0 37-17 37-43 0-25-13-39-39-39-24 0-37 15-37 40 0 27 11 42 39 42" id="cU"/><g id="ah"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,10.814814814814815,0)" xlink:href="#aP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,23.60493827160494,0)" xlink:href="#aQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,35.45679012345679,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,40.39506172839506,0)" xlink:href="#cU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,50.27160493827161,0)" xlink:href="#cT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,60.14814814814815,0)" xlink:href="#cT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,70.0246913580247,0)" xlink:href="#aW"/></g><path fill="#333" d="M99-251c69 0 84 53 88 123 5 99-61 162-144 118-15-8-21-25-26-45l46-6c4 31 50 33 63 7 7-15 12-36 12-60-9 18-29 28-54 28-48 0-72-32-72-82 0-55 31-83 87-83zm-1 128c24 0 37-16 37-39 0-27-10-51-37-51-25 0-35 19-35 45 0 25 10 45 35 45" id="cV"/><g id="ai"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#aO"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,10.814814814814815,0)" xlink:href="#aP"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,23.60493827160494,0)" xlink:href="#aQ"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,35.45679012345679,0)" xlink:href="#aU"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,40.39506172839506,0)" xlink:href="#cV"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,50.27160493827161,0)" xlink:href="#cT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,60.14814814814815,0)" xlink:href="#cT"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,70.0246913580247,0)" xlink:href="#aW"/></g><g id="aj"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#bc"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,5.8765432098765435,0)" xlink:href="#bd"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,19.703703703703706,0)" xlink:href="#be"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,30.518518518518523,0)" xlink:href="#bf"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,36.39506172839506,0)" xlink:href="#bg"/></g><path d="M190 0L58-211 59 0H30v-248h39L202-35l-2-213h31V0h-41" id="cW"/><g id="ak"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#cW"/><use transform="matrix(0.05,0,0,0.05,12.950000000000001,0)" xlink:href="#bw"/><use transform="matrix(0.05,0,0,0.05,22.950000000000003,0)" xlink:href="#bH"/><use transform="matrix(0.05,0,0,0.05,32.95,0)" xlink:href="#bp"/></g><g id="al"><use transform="matrix(0.05,0,0,0.05,0,0)" xlink:href="#ca"/><use transform="matrix(0.05,0,0,0.05,14,0)" xlink:href="#bq"/><use transform="matrix(0.05,0,0,0.05,19.95,0)" xlink:href="#bw"/><use transform="matrix(0.05,0,0,0.05,29.950000000000003,0)" xlink:href="#bn"/><use transform="matrix(0.05,0,0,0.05,39.95,0)" xlink:href="#cb"/></g></defs></g></svg>
diff --git a/.github/images/user_data.svg b/.github/images/user_data.svg
new file mode 100644
index 0000000000..a36b8051e4
--- /dev/null
+++ b/.github/images/user_data.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:lucid="lucid" width="639.59" height="980"><g transform="translate(-580 0)" lucid:page-tab-id="WugxnWre9wdv"><path d="M0 0h1760v1360H0z" fill="#fff"/><path d="M1054.86 123.1c2.84-1.72 7.44-1.72 10.28 0l89.72 53.8c2.84 1.72 2.84 4.48 0 6.2l-89.72 53.8c-2.84 1.72-7.44 1.72-10.28 0l-89.72-53.8c-2.84-1.72-2.84-4.48 0-6.2z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#a" transform="matrix(1,0,0,1,965,125) translate(41.727222222222224 60.6875)"/><path d="M1056.6 373.1c2.83-1.72 7.44-1.72 10.28 0l89.7 53.8c2.85 1.72 2.85 4.48 0 6.2l-89.7 53.8c-2.84 1.72-7.45 1.72-10.3 0l-89.7-53.8c-2.84-1.72-2.84-4.48 0-6.2z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#b" transform="matrix(1,0,0,1,966.7324861257653,375) translate(26.412407407407425 50.23222222222222)"/><use xlink:href="#c" transform="matrix(1,0,0,1,966.7324861257653,375) translate(50.58524691358026 71.56555555555556)"/><path d="M694.86 373.1c2.84-1.72 7.44-1.72 10.28 0l89.72 53.8c2.84 1.72 2.84 4.48 0 6.2l-89.72 53.8c-2.84 1.72-7.44 1.72-10.28 0l-89.72-53.8c-2.84-1.72-2.84-4.48 0-6.2z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#b" transform="matrix(1,0,0,1,605,375) translate(26.412407407407425 50.23222222222222)"/><use xlink:href="#c" transform="matrix(1,0,0,1,605,375) translate(50.58524691358026 71.56555555555556)"/><path d="M1121.73 575c22.1 0 40 17.9 40 40s-17.9 40-40 40h-120c-22.1 0-40-17.9-40-40s17.9-40 40-40z" stroke="#5e5e5e" stroke-width="3" fill="#ffbbb1" fill-opacity=".3"/><use xlink:href="#d" transform="matrix(1,0,0,1,966.7324861257653,580) translate(15.560555555555561 26.9375)"/><use xlink:href="#e" transform="matrix(1,0,0,1,966.7324861257653,580) translate(112.22722222222222 26.9375)"/><use xlink:href="#f" transform="matrix(1,0,0,1,966.7324861257653,580) translate(134.44944444444445 26.9375)"/><use xlink:href="#g" transform="matrix(1,0,0,1,966.7324861257653,580) translate(26.421666666666674 50.9375)"/><use xlink:href="#h" transform="matrix(1,0,0,1,966.7324861257653,580) translate(108.53277777777778 50.9375)"/><path d="M1062.73 555h-2v-12.75h2m0-21.33h-2v-30.24h2" fill="#5e5e5e"/><path d="M1061.96 489.7l.77-.1v1.1h-2v-1.13M1061.73 570.26L1057.1 556h9.27z" fill="#5e5e5e"/><path d="M1061.73 573.5l-6-18.5h12m-9.26 2l3.26 10.03L1065 557" fill="#5e5e5e"/><use xlink:href="#i" transform="matrix(1,0,0,1,1049.9300169899627,520.9215463349155) translate(0 14.222222222222223)"/><path d="M760 575c22.1 0 40 17.9 40 40s-17.9 40-40 40H640c-22.1 0-40-17.9-40-40s17.9-40 40-40z" stroke="#5e5e5e" stroke-width="3" fill="#ffbbb1" fill-opacity=".3"/><use xlink:href="#d" transform="matrix(1,0,0,1,605,580) translate(15.560555555555561 26.9375)"/><use xlink:href="#e" transform="matrix(1,0,0,1,605,580) translate(112.22722222222222 26.9375)"/><use xlink:href="#f" transform="matrix(1,0,0,1,605,580) translate(134.44944444444445 26.9375)"/><use xlink:href="#g" transform="matrix(1,0,0,1,605,580) translate(14.588333333333347 50.9375)"/><use xlink:href="#j" transform="matrix(1,0,0,1,605,580) translate(96.69944444444445 50.9375)"/><path d="M701 555h-2v-12.75h2m0-21.33h-2v-30.24h2" fill="#5e5e5e"/><path d="M700.23 489.7l.77-.1v1.1h-2v-1.13M700 570.26L695.36 556h9.28z" fill="#5e5e5e"/><path d="M700 573.5l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03" fill="#5e5e5e"/><use xlink:href="#i" transform="matrix(1,0,0,1,688.1975308641976,520.9215463349155) translate(0 14.222222222222223)"/><path d="M820 706c0-3.3 2.7-6 6-6h68c3.3 0 6 2.7 6 6v248c0 3.3-2.7 6-6 6h-68c-3.3 0-6-2.7-6-6z" fill="none"/><path d="M820 960c5.52 0 10-4.48 10-10V840c0-5.52 4.48-10 10-10-5.52 0-10-4.48-10-10V710c0-5.52-4.48-10-10-10" stroke="#5e5e5e" stroke-width="3" fill="none"/><path d="M776 722.5c24.3 0 44 17.9 44 40s-19.7 40-44 40H644c-24.3 0-44-17.9-44-40s19.7-40 44-40z" stroke="#5e5e5e" stroke-width="3" fill="#ffbbb1" fill-opacity=".3"/><use xlink:href="#k" transform="matrix(1,0,0,1,605,727.5) translate(44.00500000000002 26.9375)"/><use xlink:href="#l" transform="matrix(1,0,0,1,605,727.5) translate(91.67166666666668 26.9375)"/><use xlink:href="#m" transform="matrix(1,0,0,1,605,727.5) translate(16.838333333333345 50.9375)"/><use xlink:href="#n" transform="matrix(1,0,0,1,605,727.5) translate(98.94944444444445 50.9375)"/><use xlink:href="#o" transform="matrix(1,0,0,1,605,727.5) translate(114.44944444444445 50.9375)"/><path d="M776 857.5c24.3 0 44 17.9 44 40s-19.7 40-44 40H644c-24.3 0-44-17.9-44-40s19.7-40 44-40z" stroke="#5e5e5e" stroke-width="3" fill="#ffbbb1" fill-opacity=".3"/><use xlink:href="#p" transform="matrix(1,0,0,1,605,862.5) translate(27.921666666666674 26.9375)"/><use xlink:href="#q" transform="matrix(1,0,0,1,605,862.5) translate(98.92166666666667 26.9375)"/><use xlink:href="#r" transform="matrix(1,0,0,1,605,862.5) translate(66.72722222222222 50.9375)"/><path d="M711 805c0 .55-.45 1-1 1s-1-.45-1-1 .45-1 1-1 1 .45 1 1zm0 4.06c0 .56-.45 1-1 1s-1-.44-1-1c0-.55.45-1 1-1s1 .45 1 1zm0 4.07c0 .55-.45 1-1 1s-1-.45-1-1c0-.56.45-1 1-1s1 .44 1 1zm0 4.06c0 .54-.45 1-1 1s-1-.46-1-1c0-.57.45-1 1-1s1 .43 1 1zm0 4.05c0 .55-.45 1-1 1s-1-.45-1-1 .45-1 1-1 1 .45 1 1zm0 4.06c0 .56-.45 1-1 1s-1-.44-1-1c0-.54.45-1 1-1s1 .46 1 1zm0 4.07c0 .56-.45 1-1 1s-1-.44-1-1c0-.55.45-1 1-1s1 .45 1 1zm0 4.07c0 .55-.45 1-1 1s-1-.45-1-1c0-.56.45-1 1-1s1 .44 1 1z" fill="#5e5e5e"/><path d="M711 805.03h-2V804h2" fill="#5e5e5e"/><path d="M710 852.76l-4.64-14.26h9.28z" stroke="#5e5e5e" stroke-width="2" fill="#fff"/><path d="M874.86 243.1c2.84-1.72 7.44-1.72 10.28 0l89.72 53.8c2.84 1.72 2.84 4.48 0 6.2l-89.72 53.8c-2.84 1.72-7.44 1.72-10.28 0l-89.72-53.8c-2.84-1.72-2.84-4.48 0-6.2z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#s" transform="matrix(1,0,0,1,785,245) translate(58.977222222222224 60.6875)"/><path d="M715.86 301h-9.73l-2.64.7-1.8 1.8-.7 2.63v45.7h-2v-45.96l.92-3.42 2.53-2.53 3.42-.92h10m64.86 2h-28.66v-2h28.66" fill="#5e5e5e"/><path d="M781.77 301h-1.06v-2h1M700 367.1l-4.64-14.28h9.28z" fill="#5e5e5e"/><path d="M700 370.32l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03" fill="#5e5e5e"/><use xlink:href="#t" transform="matrix(1,0,0,1,715.8634941153605,290.6666666666667) translate(0 12.444444444444446)"/><path d="M1062.73 351.82h-2v-42.5h2M979.27 301v-2h16.32v2" fill="#5e5e5e"/><path d="M979.3 301h-1l-.07-2h1.06M1061.73 367.1l-4.63-14.28h9.27z" fill="#5e5e5e"/><path d="M1061.73 370.32l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03" fill="#5e5e5e"/><use xlink:href="#u" transform="matrix(1,0,0,1,995.5903901430833,290.6666666666667) translate(0 12.444444444444446)"/><path d="M909.72 181h-23.6l-2.63.7-1.8 1.8-.7 2.63v35.7h-2v-35.96l.92-3.42 2.53-2.53 3.42-.92h23.85m51 2h-24.2v-2h24.2M961.77 181h-1.06v-2h1M880 237.1l-4.64-14.28h9.28z" fill="#5e5e5e"/><path d="M880 240.32l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03" fill="#5e5e5e"/><use xlink:href="#v" transform="matrix(1,0,0,1,909.7172282804474,170.66666666666666) translate(0 12.444444444444446)"/><path d="M860 806c0-3.3 2.7-6 6-6h108c3.3 0 6 2.7 6 6v48c0 3.3-2.7 6-6 6H866c-3.3 0-6-2.7-6-6z" stroke="#5e5e5e" stroke-width="3" fill="#fff"/><use xlink:href="#w" transform="matrix(1,0,0,1,865,805) translate(12.25 28.5)"/><path d="M1199.27 714.13l-.93 3.42-2.53 2.53-3.4.92H926.13l-2.64.7-1.8 1.8-.7 2.63V780h-2v-54.13l.92-3.42 2.53-2.53 3.42-.92h266.26l2.64-.7 1.78-1.8.72-2.63V250h2m-3.46-70.08l2.54 2.53.93 3.42v45.47h-2v-45.2l-.72-2.65-1.78-1.8-2.64-.7h-32.86v-2h33.13" fill="#5e5e5e"/><path d="M1159.3 181h-1l-.07-2h1.06M920 795.26L915.36 781h9.28z" fill="#5e5e5e"/><path d="M920 798.5l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03" fill="#5e5e5e"/><g><use xlink:href="#x" transform="matrix(1,0,0,1,1187.9379726718073,231.33863837312123) translate(0 12.444444444444446)"/></g><path d="M921 780h-2V601.85h2m-3.45-171.93l2.53 2.53.92 3.42v147.3h-2V436.14l-.7-2.64-1.8-1.8-2.63-.7h-114.6v-2h114.86" fill="#5e5e5e"/><path d="M799.3 431h-1l-.07-2h1.06M920 795.26L915.36 781h9.28z" fill="#5e5e5e"/><path d="M920 798.5l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03M921 780h-2V601.85h2M962.47 431h-36.34l-2.64.7-1.8 1.8-.7 2.63v147.05h-2v-147.3l.92-3.43 2.53-2.53 3.42-.92h36.6" fill="#5e5e5e"/><path d="M963.5 431h-1.06v-2h1M920 795.26L915.36 781h9.28z" fill="#5e5e5e"/><path d="M920 798.5l-6-18.5h12m-9.26 2l3.26 10.03 3.26-10.03" fill="#5e5e5e"/><g><use xlink:href="#v" transform="matrix(1,0,0,1,906.604938271605,583.1829901866013) translate(0 12.444444444444446)"/></g><path d="M1090.87 50c0 16.57-13.82 30-30.87 30s-30.87-13.43-30.87-30 13.82-30 30.87-30 30.87 13.43 30.87 30z" stroke="#5e5e5e" stroke-width="3" fill="#c7e8ac" fill-opacity=".5"/><path d="M1060 82.5v19.32" stroke="#5e5e5e" stroke-width="2" fill="none"/><path d="M1059.98 81.5l1.02-.05v1.08h-2v-1.1" fill="#5e5e5e"/><path d="M1060 117.1l-4.64-14.28h9.28z" stroke="#5e5e5e" stroke-width="2" fill="#5e5e5e"/><defs><path fill="#333" d="M24-231v-30h32v30H24zM24 0v-190h32V0H24" id="y"/><path fill="#333" d="M135-143c-3-34-86-38-87 0 15 53 115 12 119 90S17 21 10-45l28-5c4 36 97 45 98 0-10-56-113-15-118-90-4-57 82-63 122-42 12 7 21 19 24 35" id="z"/><path fill="#333" d="M-5 72V49h209v23H-5" id="A"/><path fill="#333" d="M100-194c63 0 86 42 84 106H49c0 40 14 67 53 68 26 1 43-12 49-29l28 8c-11 28-37 45-77 45C44 4 14-33 15-96c1-61 26-98 85-98zm52 81c6-60-76-77-97-28-3 7-6 17-6 28h103" id="B"/><path fill="#333" d="M143 0L79-87 56-68V0H24v-261h32v163l83-92h37l-77 82L181 0h-38" id="C"/><path fill="#333" d="M210-169c-67 3-38 105-44 169h-31v-121c0-29-5-50-35-48C34-165 62-65 56 0H25l-1-190h30c1 10-1 24 2 32 10-44 99-50 107 0 11-21 27-35 58-36 85-2 47 119 55 194h-31v-121c0-29-5-49-35-48" id="D"/><path fill="#333" d="M117-194c89-4 53 116 60 194h-32v-121c0-31-8-49-39-48C34-167 62-67 57 0H25l-1-190h30c1 10-1 24 2 32 11-22 29-35 61-36" id="E"/><path fill="#333" d="M177-190C167-65 218 103 67 71c-23-6-38-20-44-43l32-5c15 47 100 32 89-28v-30C133-14 115 1 83 1 29 1 15-40 15-95c0-56 16-97 71-98 29-1 48 16 59 35 1-10 0-23 2-32h30zM94-22c36 0 50-32 50-73 0-42-14-75-50-75-39 0-46 34-46 75s6 73 46 73" id="F"/><g id="a"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#y"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,4.388888888888888,0)" xlink:href="#z"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,14.388888888888886,0)" xlink:href="#A"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,25.499999999999993,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,36.6111111111111,0)" xlink:href="#C"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,46.6111111111111,0)" xlink:href="#z"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,56.6111111111111,0)" xlink:href="#A"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,67.72222222222221,0)" xlink:href="#D"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,84.33333333333331,0)" xlink:href="#E"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,95.44444444444443,0)" xlink:href="#F"/></g><path fill="#333" d="M141-36C126-15 110 5 73 4 37 3 15-17 15-53c-1-64 63-63 125-63 3-35-9-54-41-54-24 1-41 7-42 31l-33-3c5-37 33-52 76-52 45 0 72 20 72 64v82c-1 20 7 32 28 27v20c-31 9-61-2-59-35zM48-53c0 20 12 33 32 33 41-3 63-29 60-74-43 2-92-5-92 41" id="G"/><path fill="#333" d="M115-194c53 0 69 39 70 98 0 66-23 100-70 100C84 3 66-7 56-30L54 0H23l1-261h32v101c10-23 28-34 59-34zm-8 174c40 0 45-34 45-75 0-40-5-75-45-74-42 0-51 32-51 76 0 43 10 73 51 73" id="H"/><path fill="#333" d="M24 0v-261h32V0H24" id="I"/><path fill="#333" d="M100-194c62-1 85 37 85 99 1 63-27 99-86 99S16-35 15-95c0-66 28-99 85-99zM99-20c44 1 53-31 53-75 0-43-8-75-51-75s-53 32-53 75 10 74 51 75" id="J"/><path fill="#333" d="M59-47c-2 24 18 29 38 22v24C64 9 27 4 27-40v-127H5v-23h24l9-43h21v43h35v23H59v120" id="K"/><path fill="#333" d="M114-163C36-179 61-72 57 0H25l-1-190h30c1 12-1 29 2 39 6-27 23-49 58-41v29" id="L"/><path fill="#333" d="M115-194c55 1 70 41 70 98S169 2 115 4C84 4 66-9 55-30l1 105H24l-1-265h31l2 30c10-21 28-34 59-34zm-8 174c40 0 45-34 45-75s-6-73-45-74c-42 0-51 32-51 76 0 43 10 73 51 73" id="M"/><g id="b"><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,0,0)" xlink:href="#B"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,9.87654320987654,0)" xlink:href="#E"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,19.75308641975308,0)" xlink:href="#G"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,29.629629629629623,0)" xlink:href="#H"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,39.50617283950616,0)" xlink:href="#I"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,43.4074074074074,0)" xlink:href="#B"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,53.28395061728394,0)" xlink:href="#A"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,63.16049382716048,0)" xlink:href="#H"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,73.03703703703702,0)" xlink:href="#J"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,82.91358024691357,0)" xlink:href="#J"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,92.79012345679011,0)" xlink:href="#K"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,97.72839506172838,0)" xlink:href="#z"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,106.61728395061726,0)" xlink:href="#K"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,111.55555555555553,0)" xlink:href="#L"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,117.43209876543207,0)" xlink:href="#G"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,127.30864197530862,0)" xlink:href="#M"/></g><path fill="#333" d="M84 4C-5 8 30-112 23-190h32v120c0 31 7 50 39 49 72-2 45-101 50-169h31l1 190h-30c-1-10 1-25-2-33-11 22-28 36-60 37" id="N"/><path fill="#333" d="M85-194c31 0 48 13 60 33l-1-100h32l1 261h-30c-2-10 0-23-3-31C134-8 116 4 85 4 32 4 16-35 15-94c0-66 23-100 70-100zm9 24c-40 0-46 34-46 75 0 40 6 74 45 74 42 0 51-32 51-76 0-42-9-74-50-73" id="O"/><g id="c"><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,0,0)" xlink:href="#A"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,9.87654320987654,0)" xlink:href="#N"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,19.75308641975308,0)" xlink:href="#z"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,28.64197530864197,0)" xlink:href="#B"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,38.518518518518505,0)" xlink:href="#L"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,44.39506172839505,0)" xlink:href="#A"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,54.271604938271594,0)" xlink:href="#O"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,64.14814814814814,0)" xlink:href="#G"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,74.02469135802468,0)" xlink:href="#K"/><use transform="matrix(0.049382716049382706,0,0,0.049382716049382706,78.96296296296295,0)" xlink:href="#G"/></g><path fill="#333" d="M205 0l-28-72H64L36 0H1l101-248h38L239 0h-34zm-38-99l-47-123c-12 45-31 82-46 123h93" id="P"/><g id="d"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#P"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,13.33333333333333,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,24.44444444444444,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,35.55555555555555,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,46.66666666666666,0)" xlink:href="#E"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,57.777777777777764,0)" xlink:href="#O"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,68.88888888888887,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,79.99999999999999,0)" xlink:href="#O"/></g><g id="e"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#J"/></g><path fill="#333" d="M30 0v-248h187v28H63v79h144v27H63v87h162V0H30" id="Q"/><path fill="#333" d="M194 0L95-120 63-95V0H30v-248h33v124l119-124h40L117-140 236 0h-42" id="R"/><path fill="#333" d="M185-189c-5-48-123-54-124 2 14 75 158 14 163 119 3 78-121 87-175 55-17-10-28-26-33-46l33-7c5 56 141 63 141-1 0-78-155-14-162-118-5-82 145-84 179-34 5 7 8 16 11 25" id="S"/><g id="f"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#Q"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,13.33333333333333,0)" xlink:href="#R"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,26.66666666666666,0)" xlink:href="#S"/></g><path fill="#333" d="M108 0H70L1-190h34L89-25l56-165h34" id="T"/><g id="g"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#L"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,17.722222222222218,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,28.83333333333333,0)" xlink:href="#T"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,38.83333333333333,0)" xlink:href="#y"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,43.222222222222214,0)" xlink:href="#O"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,54.33333333333332,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,65.44444444444443,0)" xlink:href="#O"/></g><path fill="#333" d="M127-220V0H93v-220H8v-28h204v28h-85" id="U"/><path fill="#333" d="M140-251c81 0 123 46 123 126C263-46 219 4 140 4 59 4 17-45 17-125s42-126 123-126zm0 227c63 0 89-41 89-101s-29-99-89-99c-61 0-89 39-89 99S79-25 140-24" id="V"/><path fill="#333" d="M240 0l2-218c-23 76-54 145-80 218h-23L58-218 59 0H30v-248h44l77 211c21-75 51-140 76-211h43V0h-30" id="W"/><path fill="#333" d="M30 0v-248h33v221h125V0H30" id="X"/><g id="h"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#U"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.777777777777775,0)" xlink:href="#V"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.33333333333333,0)" xlink:href="#W"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,43.944444444444436,0)" xlink:href="#X"/></g><path fill="#333" d="M175 0L67-191c6 58 2 128 3 191H24v-248h59L193-55c-6-58-2-129-3-193h46V0h-61" id="Y"/><path fill="#333" d="M110-194c64 0 96 36 96 99 0 64-35 99-97 99-61 0-95-36-95-99 0-62 34-99 96-99zm-1 164c35 0 45-28 45-65 0-40-10-65-43-65-34 0-45 26-45 65 0 36 10 65 43 65" id="Z"/><g id="i"><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,0,0)" xlink:href="#Y"/><use transform="matrix(0.04938271604938272,0,0,0.04938271604938272,12.790123456790125,0)" xlink:href="#Z"/></g><path fill="#333" d="M212-179c-10-28-35-45-73-45-59 0-87 40-87 99 0 60 29 101 89 101 43 0 62-24 78-52l27 14C228-24 195 4 139 4 59 4 22-46 18-125c-6-104 99-153 187-111 19 9 31 26 39 46" id="aa"/><path fill="#333" d="M33 0v-248h34V0H33" id="ab"/><g id="j"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#aa"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,14.388888888888886,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,18.777777777777775,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,29.888888888888882,0)" xlink:href="#N"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,40.99999999999999,0)" xlink:href="#O"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,52.1111111111111,0)" xlink:href="#ab"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,57.66666666666666,0)" xlink:href="#E"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,68.77777777777777,0)" xlink:href="#y"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,73.16666666666666,0)" xlink:href="#K"/></g><path fill="#333" d="M232-93c-1 65-40 97-104 97C67 4 28-28 28-90v-158h33c8 89-33 224 67 224 102 0 64-133 71-224h33v155" id="ac"/><g id="k"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#ac"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,14.388888888888886,0)" xlink:href="#z"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,24.388888888888886,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,35.49999999999999,0)" xlink:href="#L"/></g><g id="l"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#z"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,9.999999999999998,0)" xlink:href="#N"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,21.111111111111107,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,32.222222222222214,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,43.33333333333332,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,47.72222222222221,0)" xlink:href="#y"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,52.11111111111109,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,63.2222222222222,0)" xlink:href="#O"/></g><g id="m"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.666666666666664,0)" xlink:href="#D"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,33.27777777777777,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,44.38888888888888,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,48.777777777777764,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,59.888888888888864,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,65.44444444444443,0)" xlink:href="#B"/></g><path fill="#333" d="M101-234c-31-9-42 10-38 44h38v23H63V0H32v-167H5v-23h27c-7-52 17-82 69-68v24" id="ad"/><g id="n"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#y"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,4.388888888888888,0)" xlink:href="#ad"/></g><g id="o"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#T"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,21.111111111111107,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,32.222222222222214,0)" xlink:href="#y"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,36.6111111111111,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,40.999999999999986,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,52.11111111111109,0)" xlink:href="#H"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,63.2222222222222,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,67.61111111111109,0)" xlink:href="#B"/></g><g id="p"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#W"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.611111111111107,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,27.722222222222214,0)" xlink:href="#O"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,38.83333333333332,0)" xlink:href="#N"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,49.94444444444443,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,54.333333333333314,0)" xlink:href="#B"/></g><path fill="#333" d="M26 75v-336h71v23H56V52h41v23H26" id="ae"/><path fill="#333" d="M3 75V52h41v-290H3v-23h71V75H3" id="af"/><g id="q"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#ae"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.666666666666664,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,21.055555555555554,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,32.166666666666664,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,37.72222222222222,0)" xlink:href="#ad"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,43.27777777777778,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,54.388888888888886,0)" xlink:href="#L"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,60.999999999999986,0)" xlink:href="#D"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,77.6111111111111,0)" xlink:href="#af"/></g><g id="r"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,5.5555555555555545,0)" xlink:href="#B"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,16.666666666666664,0)" xlink:href="#D"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,33.27777777777777,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,44.38888888888888,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,48.777777777777764,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,59.888888888888864,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,65.44444444444443,0)" xlink:href="#B"/></g><g id="s"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#M"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,11.111111111111109,0)" xlink:href="#I"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,15.499999999999996,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,26.611111111111107,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,32.166666666666664,0)" xlink:href="#ad"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,37.72222222222222,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,48.83333333333333,0)" xlink:href="#L"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,55.444444444444436,0)" xlink:href="#D"/></g><path fill="#333" d="M25 0v-261h50V0H25" id="ag"/><path fill="#333" d="M25-224v-37h50v37H25zM25 0v-190h50V0H25" id="ah"/><path fill="#333" d="M135-194c87-1 58 113 63 194h-50c-7-57 23-157-34-157-59 0-34 97-39 157H25l-1-190h47c2 12-1 28 3 38 12-26 28-41 61-42" id="ai"/><path fill="#333" d="M85 4C-2 5 27-109 22-190h50c7 57-23 150 33 157 60-5 35-97 40-157h50l1 190h-47c-2-12 1-28-3-38-12 25-28 42-61 42" id="aj"/><path fill="#333" d="M144 0l-44-69L55 0H2l70-98-66-92h53l41 62 40-62h54l-67 91 71 99h-54" id="ak"/><g id="t"><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,0,0)" xlink:href="#ag"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,4.320987654320988,0)" xlink:href="#ah"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,8.641975308641976,0)" xlink:href="#ai"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,18.10493827160494,0)" xlink:href="#aj"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,27.567901234567906,0)" xlink:href="#ak"/></g><path fill="#333" d="M135-194c52 0 70 43 70 98 0 56-19 99-73 100-30 1-46-15-58-35L72 0H24l1-261h50v104c11-23 29-37 60-37zM114-30c31 0 40-27 40-66 0-37-7-63-39-63s-41 28-41 65c0 36 8 64 40 64" id="al"/><path fill="#333" d="M115-3C79 11 28 4 28-45v-112H4v-33h27l15-45h31v45h36v33H77v99c-1 23 16 31 38 25v30" id="am"/><path fill="#333" d="M185-48c-13 30-37 53-82 52C43 2 14-33 14-96s30-98 90-98c62 0 83 45 84 108H66c0 31 8 55 39 56 18 0 30-7 34-22zm-45-69c5-46-57-63-70-21-2 6-4 13-4 21h74" id="an"/><path fill="#333" d="M135-150c-39-12-60 13-60 57V0H25l-1-190h47c2 13-1 29 3 40 6-28 27-53 61-41v41" id="ao"/><path fill="#333" d="M190-63c-7 42-38 67-86 67-59 0-84-38-90-98-12-110 154-137 174-36l-49 2c-2-19-15-32-35-32-30 0-35 28-38 64-6 74 65 87 74 30" id="ap"/><path fill="#333" d="M147 0L96-86 75-71V0H25v-261h50v150l67-79h53l-66 74L201 0h-54" id="aq"/><g id="u"><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,0,0)" xlink:href="#al"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,9.462962962962964,0)" xlink:href="#Z"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,18.925925925925927,0)" xlink:href="#am"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,24.067901234567906,0)" xlink:href="#am"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,29.209876543209873,0)" xlink:href="#ag"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,33.53086419753087,0)" xlink:href="#an"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,42.17283950617284,0)" xlink:href="#ao"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,48.22222222222223,0)" xlink:href="#Z"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,57.68518518518519,0)" xlink:href="#ap"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,66.32716049382717,0)" xlink:href="#aq"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,74.96913580246914,0)" xlink:href="#an"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,83.61111111111111,0)" xlink:href="#am"/></g><path fill="#333" d="M146-102V0H94v-102L6-248h54l60 105 60-105h54" id="ar"/><path fill="#333" d="M137-138c1-29-70-34-71-4 15 46 118 7 119 86 1 83-164 76-172 9l43-7c4 19 20 25 44 25 33 8 57-30 24-41C81-84 22-81 20-136c-2-80 154-74 161-7" id="as"/><g id="v"><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,0,0)" xlink:href="#ar"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,9.506172839506174,0)" xlink:href="#an"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,18.148148148148152,0)" xlink:href="#as"/></g><path fill="#333" d="M160-131c35 5 61 23 61 61C221 17 115-2 30 0v-248c76 3 177-17 177 60 0 33-19 50-47 57zm-97-11c50-1 110 9 110-42 0-47-63-36-110-37v79zm0 115c55-2 124 14 124-45 0-56-70-42-124-44v89" id="at"/><g id="w"><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,0,0)" xlink:href="#at"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,13.33333333333333,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,24.44444444444444,0)" xlink:href="#J"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,35.55555555555555,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,41.11111111111111,0)" xlink:href="#z"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,51.11111111111111,0)" xlink:href="#K"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,56.666666666666664,0)" xlink:href="#L"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,63.277777777777764,0)" xlink:href="#G"/><use transform="matrix(0.055555555555555546,0,0,0.055555555555555546,74.38888888888889,0)" xlink:href="#M"/></g><g id="x"><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,0,0)" xlink:href="#Y"/><use transform="matrix(0.04320987654320988,0,0,0.04320987654320988,11.19135802469136,0)" xlink:href="#Z"/></g></defs></g></svg>
diff --git a/.github/semantic.yml b/.github/semantic.yml
deleted file mode 100644
index 376c06a6a8..0000000000
--- a/.github/semantic.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Always validate the PR title, and ignore the commits
-titleOnly: true
-
-# Always validate all commits, and ignore the PR title
-commitsOnly: false
-
-# Always validate the PR title AND all the commits
-titleAndCommits: false
-
-# Require at least one commit to be valid
-# this is only relevant when using commitsOnly: true or titleAndCommits: true,
-# which validate all commits by default
-anyCommit: false
-
-# By default types specified in commitizen/conventional-commit-types is used.
-# See: https://github.com/commitizen/conventional-commit-types/blob/v2.3.0/index.json
-# You can override the valid types
-types:
-  - feat
-  - fix
-  - improvement
-  - docs
-  - refactor
-  - test
-  - ci
-  - chore
-
-# Allow use of Merge commits (eg on github: "Merge branch 'master' into feature/ride-unicorns")
-# this is only relevant when using commitsOnly: true (or titleAndCommits: true)
-allowMergeCommits: false
diff --git a/.github/stale.yml b/.github/stale.yml
deleted file mode 100644
index 894f3a0efb..0000000000
--- a/.github/stale.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-# Number of days of inactivity before an issue becomes stale
-daysUntilStale: 90
-
-# Number of days of inactivity before an stale issue is closed
-daysUntilClose: 30
-
-# Label to use when marking an issue as stale
-staleLabel: stale
-
-issues:
-  # Comment to post when marking an issue as stale.
-  markComment: >
-    This issue has been automatically marked as stale because it has not had
-    recent activity. It will be closed if no further activity occurs. Thank you
-    for your contributions.
-  # Comment to post when closing a stale issue.
-  closeComment: >
-    This issue has been automatically closed because it has not had recent
-    activity since being marked as stale.
-pulls:
-  # Comment to post when marking a PR as stale.
-  markComment: >
-    This PR has been automatically marked as stale because it has not had
-    recent activity. It will be closed if no further activity occurs. Thank you
-    for your contributions.
-
-    To track this PR (even if closed), please open a corresponding issue if one
-    does not already exist.
-  # Comment to post when closing a stale PR.
-  closeComment: >
-    This PR has been automatically closed because it has not had recent
-    activity since being marked as stale.
-
-    Please reopen when work resumes.
diff --git a/.github/workflows/changelog-check.yaml b/.github/workflows/changelog-check.yaml
deleted file mode 100644
index 0eee4fdbe0..0000000000
--- a/.github/workflows/changelog-check.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-name: CHANGELOG Checks
-on:
-  pull_request_target:
-    paths:
-      - CHANGELOG.md
-    branches-ignore:
-      - releases/**
-
-jobs:
-  comment:
-    name: Comment
-    runs-on: ubuntu-latest
-    steps:
-      - name: Find Existing PR Comment
-        id: prc
-        uses: peter-evans/find-comment@v1
-        with:
-          issue-number: ${{ github.event.pull_request.number }}
-          comment-author: "github-actions[bot]"
-          body-includes: "The `CHANGELOG.md` file contents are handled by the maintainers during merge."
-      - name: PR Comment
-        if: ${{ steps.prc.outputs.comment-id == '' }}
-        uses: peter-evans/create-or-update-comment@v1
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Thank you for your contribution!
-
-            The `CHANGELOG.md` file contents are handled by the maintainers during merge. This is to prevent pull request merge conflicts.
-            Please see the Contributing Guide for additional pull request review items.
-
-            Remove any changes to the `CHANGELOG.md` file and commit them in this pull request.
-      - name: Fail the check if changelog change
-        run: exit 1
diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml
new file mode 100644
index 0000000000..bd5f2df7cb
--- /dev/null
+++ b/.github/workflows/lock.yml
@@ -0,0 +1,21 @@
+name: 'Lock Threads'
+
+on:
+  schedule:
+    - cron: '50 1 * * *'
+
+jobs:
+  lock:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: dessant/lock-threads@v5
+        with:
+          github-token: ${{ secrets.GITHUB_TOKEN }}
+          issue-comment: >
+            I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
+            If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
+          issue-inactive-days: '30'
+          pr-comment: >
+            I'm going to lock this pull request because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
+            If you have found a problem that seems related to this change, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
+          pr-inactive-days: '30'
diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml
new file mode 100644
index 0000000000..1e50760ee7
--- /dev/null
+++ b/.github/workflows/pr-title.yml
@@ -0,0 +1,52 @@
+name: 'Validate PR title'
+
+on:
+  pull_request_target:
+    types:
+      - opened
+      - edited
+      - synchronize
+
+jobs:
+  main:
+    name: Validate PR title
+    runs-on: ubuntu-latest
+    steps:
+      # Please look up the latest version from
+      # https://github.com/amannn/action-semantic-pull-request/releases
+      - uses: amannn/action-semantic-pull-request@v5.5.3
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          # Configure which types are allowed.
+          # Default: https://github.com/commitizen/conventional-commit-types
+          types: |
+            fix
+            feat
+            docs
+            ci
+            chore
+          # Configure that a scope must always be provided.
+          requireScope: false
+          # Configure additional validation for the subject based on a regex.
+          # This example ensures the subject starts with an uppercase character.
+          subjectPattern: ^[A-Z].+$
+          # If `subjectPattern` is configured, you can use this property to override
+          # the default error message that is shown when the pattern doesn't match.
+          # The variables `subject` and `title` can be used within the message.
+          subjectPatternError: |
+            The subject "{subject}" found in the pull request title "{title}"
+            didn't match the configured pattern. Please ensure that the subject
+            starts with an uppercase character.
+          # For work-in-progress PRs you can typically use draft pull requests
+          # from Github. However, private repositories on the free plan don't have
+          # this option and therefore this action allows you to opt-in to using the
+          # special "[WIP]" prefix to indicate this state. This will avoid the
+          # validation of the PR title and the pull request checks remain pending.
+          # Note that a second check will be reported if this is enabled.
+          wip: true
+          # When using "Squash and merge" on a PR with only one commit, GitHub
+          # will suggest using that commit message instead of the PR title for the
+          # merge commit, and it's easy to commit this by mistake. Enable this option
+          # to also validate the commit message for one commit PRs.
+          validateSingleCommit: false
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index ab9aef239c..a19ff831f9 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -2,98 +2,98 @@ name: Pre-Commit
 
 on:
   pull_request:
-  push:
     branches:
+      - main
       - master
 
+env:
+  TERRAFORM_DOCS_VERSION: v0.19.0
+  TFLINT_VERSION: v0.53.0
+
 jobs:
-  # Min Terraform version(s)
-  getDirectories:
-    name: Get root directories
+  collectInputs:
+    name: Collect workflow inputs
     runs-on: ubuntu-latest
+    outputs:
+      directories: ${{ steps.dirs.outputs.directories }}
     steps:
       - name: Checkout
-        uses: actions/checkout@v2
-      - name: Install Python
-        uses: actions/setup-python@v2
-      - name: Build matrix
-        id: matrix
-        run: |
-          DIRS=$(python -c "import json; import glob; print(json.dumps([x.replace('/versions.tf', '') for x in glob.glob('./**/versions.tf', recursive=True)]))")
-          echo "::set-output name=directories::$DIRS"
-    outputs:
-      directories: ${{ steps.matrix.outputs.directories }}
+        uses: actions/checkout@v4
+
+      - name: Get root directories
+        id: dirs
+        uses: clowdhaus/terraform-composite-actions/directories@v1.9.0
 
   preCommitMinVersions:
-    name: Min TF validate
-    needs: getDirectories
+    name: Min TF pre-commit
+    needs: collectInputs
     runs-on: ubuntu-latest
     strategy:
       matrix:
-        directory: ${{ fromJson(needs.getDirectories.outputs.directories) }}
+        directory: ${{ fromJson(needs.collectInputs.outputs.directories) }}
     steps:
+      # https://github.com/orgs/community/discussions/25678#discussioncomment-5242449
+      - name: Delete huge unnecessary tools folder
+        run: |
+          rm -rf /opt/hostedtoolcache/CodeQL
+          rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk
+          rm -rf /opt/hostedtoolcache/Ruby
+          rm -rf /opt/hostedtoolcache/go
+
       - name: Checkout
-        uses: actions/checkout@v2
-      - name: Install Python
-        uses: actions/setup-python@v2
+        uses: actions/checkout@v4
+
       - name: Terraform min/max versions
         id: minMax
-        uses: clowdhaus/terraform-min-max@v1.0.2
+        uses: clowdhaus/terraform-min-max@v1.3.1
         with:
           directory: ${{ matrix.directory }}
-      - name: Install Terraform v${{ steps.minMax.outputs.minVersion }}
-        uses: hashicorp/setup-terraform@v1
-        with:
-          terraform_version: ${{ steps.minMax.outputs.minVersion }}
-      - name: Install pre-commit dependencies
-        run: pip install pre-commit
-      - name: Execute pre-commit
+
+      - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }}
         # Run only validate pre-commit check on min version supported
         if: ${{ matrix.directory !=  '.' }}
-        run: pre-commit run terraform_validate --color=always --show-diff-on-failure --files ${{ matrix.directory }}/*
-      - name: Execute pre-commit
+        uses: clowdhaus/terraform-composite-actions/pre-commit@v1.11.1
+        with:
+          terraform-version: ${{ steps.minMax.outputs.minVersion }}
+          tflint-version: ${{ env.TFLINT_VERSION }}
+          args: 'terraform_validate --color=always --show-diff-on-failure --files ${{ matrix.directory }}/*'
+
+      - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }}
         # Run only validate pre-commit check on min version supported
         if: ${{ matrix.directory ==  '.' }}
-        run: pre-commit run terraform_validate --color=always --show-diff-on-failure --files $(ls *.tf)
+        uses: clowdhaus/terraform-composite-actions/pre-commit@v1.11.1
+        with:
+          terraform-version: ${{ steps.minMax.outputs.minVersion }}
+          tflint-version: ${{ env.TFLINT_VERSION }}
+          args: 'terraform_validate --color=always --show-diff-on-failure --files $(ls *.tf)'
 
-  # Max Terraform version
-  getBaseVersion:
-    name: Module max TF version
+  preCommitMaxVersion:
+    name: Max TF pre-commit
     runs-on: ubuntu-latest
+    needs: collectInputs
     steps:
+      # https://github.com/orgs/community/discussions/25678#discussioncomment-5242449
+      - name: Delete huge unnecessary tools folder
+        run: |
+          rm -rf /opt/hostedtoolcache/CodeQL
+          rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk
+          rm -rf /opt/hostedtoolcache/Ruby
+          rm -rf /opt/hostedtoolcache/go
+
       - name: Checkout
-        uses: actions/checkout@v2
+        uses: actions/checkout@v4
+        with:
+          ref: ${{ github.event.pull_request.head.ref }}
+          repository: ${{github.event.pull_request.head.repo.full_name}}
+
       - name: Terraform min/max versions
         id: minMax
-        uses: clowdhaus/terraform-min-max@v1.0.2
-    outputs:
-      minVersion: ${{ steps.minMax.outputs.minVersion }}
-      maxVersion: ${{ steps.minMax.outputs.maxVersion }}
+        uses: clowdhaus/terraform-min-max@v1.3.1
 
-  preCommitMaxVersion:
-    name: Max TF pre-commit
-    runs-on: ubuntu-latest
-    needs: getBaseVersion
-    strategy:
-      fail-fast: false
-      matrix:
-        version:
-          - ${{ needs.getBaseVersion.outputs.maxVersion }}
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v2
-      - name: Install Python
-        uses: actions/setup-python@v2
-      - name: Install Terraform v${{ matrix.version }}
-        uses: hashicorp/setup-terraform@v1
+      - name: Pre-commit Terraform ${{ steps.minMax.outputs.maxVersion }}
+        uses: clowdhaus/terraform-composite-actions/pre-commit@v1.11.1
         with:
-          terraform_version: ${{ matrix.version }}
-      - name: Install pre-commit dependencies
-        run: |
-          pip install pre-commit
-          curl -Lo ./terraform-docs.tar.gz https://github.com/terraform-docs/terraform-docs/releases/download/v0.13.0/terraform-docs-v0.13.0-$(uname)-amd64.tar.gz && tar -xzf terraform-docs.tar.gz && chmod +x terraform-docs && sudo mv terraform-docs /usr/bin/
-          curl -L "$(curl -s https://api.github.com/repos/terraform-linters/tflint/releases/latest | grep -o -E "https://.+?_linux_amd64.zip")" > tflint.zip && unzip tflint.zip && rm tflint.zip && sudo mv tflint /usr/bin/
-      - name: Execute pre-commit
-        # Run all pre-commit checks on max version supported
-        if: ${{ matrix.version ==  needs.getBaseVersion.outputs.maxVersion }}
-        run: pre-commit run --color=always --show-diff-on-failure --all-files
+          terraform-version: ${{ steps.minMax.outputs.maxVersion }}
+          tflint-version: ${{ env.TFLINT_VERSION }}
+          terraform-docs-version: ${{ env.TERRAFORM_DOCS_VERSION }}
+          install-hcledit: true
diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml
new file mode 100644
index 0000000000..918601d915
--- /dev/null
+++ b/.github/workflows/publish-docs.yml
@@ -0,0 +1,41 @@
+name: Publish docs via GitHub Pages
+on:
+  workflow_dispatch:
+  push:
+    branches:
+      - main
+      - master
+
+permissions:
+  contents: read
+
+jobs:
+  build:
+    name: Deploy docs
+    runs-on: ubuntu-latest
+    permissions:
+      contents: write
+    steps:
+      - name: Checkout main
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+
+      - name: Set up Python
+        uses: actions/setup-python@v5
+        with:
+          python-version: 3.x
+
+      - name: Install dependencies
+        run: |
+          python -m pip install --upgrade pip
+          python -m pip install mkdocs-material==9.5.26 \
+            mkdocs-include-markdown-plugin==6.2.0 \
+            mkdocs-awesome-pages-plugin==2.9.2
+
+      - name: git config
+        run: |
+          git config --local user.email "action@github.com"
+          git config --local user.name "GitHub Action"
+
+      - run: mkdocs gh-deploy --force
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000000..4a9422614e
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,37 @@
+name: Release
+
+on:
+  workflow_dispatch:
+  push:
+    branches:
+      - main
+      - master
+    paths:
+      - '**/*.tpl'
+      - '**/*.py'
+      - '**/*.tf'
+      - '.github/workflows/release.yml'
+
+jobs:
+  release:
+    name: Release
+    runs-on: ubuntu-latest
+    # Skip running release workflow on forks
+    if: github.repository_owner == 'terraform-aws-modules'
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          persist-credentials: false
+          fetch-depth: 0
+
+      - name: Release
+        uses: cycjimmy/semantic-release-action@v4
+        with:
+          semantic_version: 23.0.2
+          extra_plugins: |
+            @semantic-release/changelog@6.0.3
+            @semantic-release/git@10.0.1
+            conventional-changelog-conventionalcommits@7.0.2
+        env:
+          GITHUB_TOKEN: ${{ secrets.SEMANTIC_RELEASE_TOKEN }}
diff --git a/.github/workflows/stale-actions.yaml b/.github/workflows/stale-actions.yaml
new file mode 100644
index 0000000000..6ccd0ed856
--- /dev/null
+++ b/.github/workflows/stale-actions.yaml
@@ -0,0 +1,32 @@
+name: 'Mark or close stale issues and PRs'
+on:
+  schedule:
+    - cron: '0 0 * * *'
+
+jobs:
+  stale:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/stale@v9
+        with:
+          repo-token: ${{ secrets.GITHUB_TOKEN }}
+          # Staling issues and PR's
+          days-before-stale: 30
+          stale-issue-label: stale
+          stale-pr-label: stale
+          stale-issue-message: |
+            This issue has been automatically marked as stale because it has been open 30 days
+            with no activity. Remove stale label or comment or this issue will be closed in 10 days
+          stale-pr-message: |
+            This PR has been automatically marked as stale because it has been open 30 days
+            with no activity. Remove stale label or comment or this PR will be closed in 10 days
+          # Not stale if have this labels or part of milestone
+          exempt-issue-labels: bug,wip,on-hold
+          exempt-pr-labels: bug,wip,on-hold
+          exempt-all-milestones: true
+          # Close issue operations
+          # Label will be automatically removed if the issues are no longer closed nor locked.
+          days-before-close: 10
+          delete-branch: true
+          close-issue-message: This issue was automatically closed because of stale in 10 days
+          close-pr-message: This PR was automatically closed because of stale in 10 days
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 6ef8a79bb9..b489ccee0d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,8 +1,32 @@
 repos:
-- repo: git://github.com/antonbabenko/pre-commit-terraform
-  rev: v1.48.0
-  hooks:
-    - id: terraform_fmt
-    - id: terraform_docs
-    - id: terraform_validate
-    - id: terraform_tflint
+  - repo: https://github.com/antonbabenko/pre-commit-terraform
+    rev: v1.99.5
+    hooks:
+      - id: terraform_fmt
+      - id: terraform_docs
+        args:
+          - '--args=--lockfile=false'
+      - id: terraform_tflint
+        args:
+          - '--args=--only=terraform_deprecated_interpolation'
+          - '--args=--only=terraform_deprecated_index'
+          - '--args=--only=terraform_unused_declarations'
+          - '--args=--only=terraform_comment_syntax'
+          - '--args=--only=terraform_documented_outputs'
+          - '--args=--only=terraform_documented_variables'
+          - '--args=--only=terraform_typed_variables'
+          - '--args=--only=terraform_module_pinned_source'
+          - '--args=--only=terraform_naming_convention'
+          - '--args=--only=terraform_required_version'
+          - '--args=--only=terraform_required_providers'
+          - '--args=--only=terraform_standard_module_structure'
+          - '--args=--only=terraform_workspace_remote'
+      - id: terraform_validate
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v5.0.0
+    hooks:
+      - id: check-merge-conflict
+      - id: end-of-file-fixer
+      - id: trailing-whitespace
+      - id: mixed-line-ending
+        args: [--fix=lf]
diff --git a/.releaserc.json b/.releaserc.json
new file mode 100644
index 0000000000..66b3eefd65
--- /dev/null
+++ b/.releaserc.json
@@ -0,0 +1,45 @@
+{
+  "branches": [
+    "main",
+    "master"
+  ],
+  "ci": false,
+  "plugins": [
+    [
+      "@semantic-release/commit-analyzer",
+      {
+        "preset": "conventionalcommits"
+      }
+    ],
+    [
+      "@semantic-release/release-notes-generator",
+      {
+        "preset": "conventionalcommits"
+      }
+    ],
+    [
+      "@semantic-release/github",
+      {
+        "successComment": "This ${issue.pull_request ? 'PR is included' : 'issue has been resolved'} in version ${nextRelease.version} :tada:",
+        "labels": false,
+        "releasedLabels": false
+      }
+    ],
+    [
+      "@semantic-release/changelog",
+      {
+        "changelogFile": "CHANGELOG.md",
+        "changelogTitle": "# Changelog\n\nAll notable changes to this project will be documented in this file."
+      }
+    ],
+    [
+      "@semantic-release/git",
+      {
+        "assets": [
+          "CHANGELOG.md"
+        ],
+        "message": "chore(release): version ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}"
+      }
+    ]
+  ]
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 305584782a..1ba1615006 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,17 +1,1595 @@
-# Change Log
+# Changelog
 
 All notable changes to this project will be documented in this file.
 
-The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
-project adheres to [Semantic Versioning](http://semver.org/).
+## [21.0.6](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v21.0.5...v21.0.6) (2025-07-30)
 
-<a name="unreleased"></a>
-## [Unreleased]
 
+### Bug Fixes
+
+* Allow `instance_requirements` to be set in self-managed node groups ([#3455](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3455)) ([5322bf7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/5322bf72fbbff4afb6a02ae283b21419d9de5b17))
+
+## [21.0.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v21.0.4...v21.0.5) (2025-07-29)
+
+
+### Bug Fixes
+
+* Correct addon logic lookup to pull latest addon version ([#3449](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3449)) ([55d7fa2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/55d7fa23a356f518ae7b73ec2ddb0ab5947f9a42))
+
+## [21.0.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v21.0.3...v21.0.4) (2025-07-25)
+
+
+### Bug Fixes
+
+* Correct encryption configuration enable logic; avoid creating Auto Mode policy when Auto Mode is not enabled ([#3439](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3439)) ([6b8a3d9](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6b8a3d94777346d79a64ccd8287c96b525348013))
+
+## [21.0.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v21.0.2...v21.0.3) (2025-07-24)
+
+
+### Bug Fixes
+
+* Correct variable defaults for `ami_id` and `kubernetes_version` ([#3437](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3437)) ([8807e0b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8807e0bb55fdc49ed894b5b51c14131526dbfb91))
+
+## [21.0.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v21.0.1...v21.0.2) (2025-07-24)
+
+
+### Bug Fixes
+
+* Move `encryption_config` default for `resources` out of type definition and to default variable value to allow disabling encryption ([#3436](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3436)) ([b37368f](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b37368fdbc608a026f9c17952d964467f5e44e8a))
+
+## [21.0.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v21.0.0...v21.0.1) (2025-07-24)
+
+
+### Bug Fixes
+
+* Correct logic to try to use module created IAM role before falli… ([#3433](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3433)) ([97d4ebb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/97d4ebbe68a23aa431a534fd7ed56a76f9b37801))
+
+## [21.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.37.2...v21.0.0) (2025-07-23)
+
+
+### ⚠ BREAKING CHANGES
+
+* Upgrade min AWS provider and Terraform versions to `6.0` and `1.5.7` respectively (#3412)
+
+### Features
+
+* Upgrade min AWS provider and Terraform versions to `6.0` and `1.5.7` respectively ([#3412](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3412)) ([416515a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/416515a0da1ca96c539977d6460e2bc02f10b4d4))
+
+## [20.37.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.37.1...v20.37.2) (2025-07-17)
+
+
+### Bug Fixes
+
+* Allow for both `amazonaws.com.cn` and `amazonaws.com` conditions in PassRole as required for AWS CN ([#3422](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3422)) ([83b68fd](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/83b68fda2b0ea818fc980ab847dd8255a2d18334))
+
+## [20.37.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.37.0...v20.37.1) (2025-06-18)
+
+
+### Bug Fixes
+
+* Restrict AWS provider max version due to v6 provider breaking changes ([#3384](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3384)) ([681a868](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/681a868d624878474fd9f92d1b04d3fec0120db7))
+
+## [20.37.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.36.1...v20.37.0) (2025-06-09)
+
+
+### Features
+
+* Add AL2023 ARM64 NVIDIA variants ([#3369](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3369)) ([715d42b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/715d42bf146791cad911b0b6979c5ce67bc0d2f6))
+
+## [20.36.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.36.0...v20.36.1) (2025-06-09)
+
+
+### Bug Fixes
+
+* Ensure `additional_cluster_dns_ips` is passed through from root module ([#3376](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3376)) ([7a83b1b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7a83b1b3db9c7475fe6ec46d1c300c0a18f19b2a))
+
+## [20.36.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.35.0...v20.36.0) (2025-04-18)
+
+
+### Features
+
+* Add support for cluster `force_update_version` ([#3345](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3345)) ([207d73f](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/207d73fbaa5eebe6e98b94e95b83fd0a5a13c307))
+
+## [20.35.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.34.0...v20.35.0) (2025-03-29)
+
+
+### Features
+
+* Default to not changing autoscaling schedule values at the scheduled time ([#3322](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3322)) ([abf76f6](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/abf76f60144fe645bbf500d98505377fd4a9da79))
+
+## [20.34.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.33.1...v20.34.0) (2025-03-07)
+
+
+### Features
+
+* Add capacity reservation permissions to Karpenter IAM policy ([#3318](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3318)) ([770ee99](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/770ee99d9c4b61c509d9988eac62de4db113af91))
+
+## [20.33.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.33.0...v20.33.1) (2025-01-22)
+
+
+### Bug Fixes
+
+* Allow `"EC2"` access entry type for EKS Auto Mode custom node pools ([#3281](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3281)) ([3e2ea83](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3e2ea83267d7532cb66fa4de7f0d2a944b43c3d5))
+
+## [20.33.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.32.0...v20.33.0) (2025-01-17)
+
+
+### Features
+
+* Add node repair config to managed node group ([#3271](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3271)) ([edd7ef3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/edd7ef36dd0f6b6801275cbecbb6780f03fc7aed)), closes [terraform-aws-modules/terraform-aws-eks#3249](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3249)
+
+## [20.32.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.6...v20.32.0) (2025-01-17)
+
+
+### Features
+
+* Add Bottlerocket FIPS image variants ([#3275](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3275)) ([d876ac4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d876ac4ef1bb45e4f078d0928630033b659c9aa0))
+
+## [20.31.6](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.5...v20.31.6) (2024-12-20)
+
+
+### Bug Fixes
+
+* Revert changes to disabling auto mode [#3253](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3253) ([#3255](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3255)) ([1ac67b8](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1ac67b8a60e336285c4dca03e550dfc78d64acce))
+
+## [20.31.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.4...v20.31.5) (2024-12-20)
+
+
+### Bug Fixes
+
+* Correct Auto Mode disable ([#3253](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3253)) ([2a6a57a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/2a6a57a9bb1c6563608985bbdbfb7f47eec971df))
+
+## [20.31.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.3...v20.31.4) (2024-12-14)
+
+
+### Bug Fixes
+
+* Auto Mode custom tag policy should apply to cluster role, not node role ([#3242](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3242)) ([a07013a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a07013a1f4d4d56b56eb2e6265a6f38041a4540b))
+
+## [20.31.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.2...v20.31.3) (2024-12-12)
+
+
+### Bug Fixes
+
+* Update min provider version to remediate cluster replacement when enabling EKS Auto Mode ([#3240](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3240)) ([012e51c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/012e51c05551da48a7f380d4a7b75880b0c24fe1))
+
+## [20.31.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.1...v20.31.2) (2024-12-12)
+
+
+### Bug Fixes
+
+* Avoid trying to attach the node role when Auto Mode nodepools are not specified ([#3239](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3239)) ([ce34f1d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ce34f1db3f7824167d9a766e6c90dee3a6dcf1c3))
+
+## [20.31.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.31.0...v20.31.1) (2024-12-09)
+
+
+### Bug Fixes
+
+* Create EKS Auto Mode role when Auto Mode is enabled, regardless of built-in node pool use ([#3234](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3234)) ([e2846be](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e2846be8b110e59d36d6f868b74531a6d8ca4987))
+
+## [20.31.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.30.1...v20.31.0) (2024-12-04)
+
+
+### Features
+
+* Add support for EKS Auto Mode and EKS Hybrid nodes ([#3225](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3225)) ([3b974d3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3b974d33ad79e142566dd7bcb4bf10472cc91899))
+
+## [20.30.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.30.0...v20.30.1) (2024-11-26)
+
+
+### Bug Fixes
+
+* Coalesce local `resolve_conflicts_on_create_default` value to a boolean since default is `null` ([#3221](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3221)) ([35388bb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/35388bb8c4cfa0c351427c133490b914b9944b07))
+
+## [20.30.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.29.0...v20.30.0) (2024-11-26)
+
+
+### Features
+
+* Improve addon dependency chain and decrease time to provision addons (due to retries) ([#3218](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3218)) ([ab2207d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ab2207d50949079d5dd97c976c6f7a8f5b668f0c))
+
+## [20.29.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.28.0...v20.29.0) (2024-11-08)
+
+
+### Features
+
+* Add support for pod identity association on EKS addons ([#3203](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3203)) ([a224334](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a224334fc8000dc8728971dff8adad46ceb7a8a1))
+
+## [20.28.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.27.0...v20.28.0) (2024-11-02)
+
+
+### Features
+
+* Add support for creating `efa-only` network interfaces ([#3196](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3196)) ([c6da22c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c6da22c78f60a8643a6c76f97c93724f4e1f4e5a))
+
+## [20.27.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.26.1...v20.27.0) (2024-11-01)
+
+
+### Features
+
+* Add support for zonal shift ([#3195](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3195)) ([1b0ac83](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1b0ac832647dcf0425aedba119fa8276008cbe28))
+
+## [20.26.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.26.0...v20.26.1) (2024-10-27)
+
+
+### Bug Fixes
+
+* Use dynamic partition data source to determine DNS suffix for Karpenter EC2 pass role permission ([#3193](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3193)) ([dea6c44](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/dea6c44b459a546b1386563dfd497bc9d766bfe1))
+
+## [20.26.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.25.0...v20.26.0) (2024-10-12)
+
+
+### Features
+
+* Add support for `desired_capacity_type` (named `desired_size_type`) on self-managed node group ([#3166](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3166)) ([6974a5e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6974a5e1582a4ed2d8b1f9a07cdacd156ba5ffef))
+
+## [20.25.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.24.3...v20.25.0) (2024-10-12)
+
+
+### Features
+
+* Add support for newly released AL2023 accelerated AMI types ([#3177](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3177)) ([b2a8617](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b2a8617794a782107399b26c1ff4503e0ea5ec3a))
+
+
+### Bug Fixes
+
+* Update CI workflow versions to latest ([#3176](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3176)) ([eb78240](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/eb78240617993845a2a85056655b16302ea9a02c))
+
+## [20.24.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.24.2...v20.24.3) (2024-10-03)
+
+
+### Bug Fixes
+
+* Add `primary_ipv6` parameter to self-managed-node-group ([#3169](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3169)) ([fef6555](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/fef655585b33d717c1665bf8151f0573a17dedc2))
+
+## [20.24.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.24.1...v20.24.2) (2024-09-21)
+
+
+### Bug Fixes
+
+* Remove deprecated `inline_policy` from cluster role ([#3163](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3163)) ([8b90872](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8b90872983b9c349ff2e0a71678d687dc32ed626))
+
+## [20.24.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.24.0...v20.24.1) (2024-09-16)
+
+
+### Bug Fixes
+
+* Correct Karpenter EC2 service principal DNS suffix in non-commercial regions ([#3157](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3157)) ([47ab3eb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/47ab3eb884ab243a99322998445127ea6802fcaf))
+
+## [20.24.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.23.0...v20.24.0) (2024-08-19)
+
+
+### Features
+
+* Add support for Karpenter v1 controller IAM role permissions ([#3126](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3126)) ([e317651](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e31765153570631c1978e11cfd1d28e5fc349d8f))
+
+## [20.23.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.22.1...v20.23.0) (2024-08-09)
+
+
+### Features
+
+* Add new output values for OIDC issuer URL and provider that are dual-stack compatible ([#3120](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3120)) ([72668ac](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/72668ac04a2879fd3294e6059238b4aed57278fa))
+
+## [20.22.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.22.0...v20.22.1) (2024-08-09)
+
+
+### Bug Fixes
+
+* Eliminates null check on tag values to fix for_each error about unknown *keys* ([#3119](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3119)) ([6124a08](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6124a08578d6c6bca1851df4c82cb7e2126e460a)), closes [#3118](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3118) [#2760](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2760) [#2681](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2681) [#2337](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2337)
+
+## [20.22.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.21.0...v20.22.0) (2024-08-05)
+
+
+### Features
+
+* Enable update in place for node groups with cluster placement group strategy ([#3045](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3045)) ([75db486](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/75db486530459a04ce6eb2e4ed44b29d062de1b3))
+
+## [20.21.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.20.0...v20.21.0) (2024-08-05)
+
+
+### Features
+
+* Add support for `upgrade_policy` ([#3112](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3112)) ([e12ab7a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e12ab7a5de4ac82968aaede419752ce2bbb6a93d))
+
+## [20.20.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.19.0...v20.20.0) (2024-07-19)
+
+
+### Features
+
+* Enable support for ignore_failed_scaling_activities ([#3104](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3104)) ([532226e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/532226e64e61328b25426cabc27e4009e085154f))
+
+## [20.19.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.18.0...v20.19.0) (2024-07-15)
+
+
+### Features
+
+* Pass the `primary_ipv6` argument to the AWS provider. ([#3098](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3098)) ([e1bb8b6](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e1bb8b66617299c6d9972139b1f9355322e7801e))
+
+## [20.18.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.17.2...v20.18.0) (2024-07-15)
+
+
+### Features
+
+* Support `bootstrap_self_managed_addons` ([#3099](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3099)) ([af88e7d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/af88e7d2f835b3dfde242157ba3dd98b749bbc0b))
+
+## [20.17.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.17.1...v20.17.2) (2024-07-05)
+
+
+### Bug Fixes
+
+* Revert [#3058](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3058) - fix: Invoke aws_iam_session_context data source only when required ([#3092](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3092)) ([93ffdfc](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/93ffdfc6fa380cb0b73df7380e7e62302ebb1a98))
+
+## [20.17.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.17.0...v20.17.1) (2024-07-05)
+
+
+### Bug Fixes
+
+* Invoke `aws_iam_session_context` data source only when required ([#3058](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3058)) ([f02df92](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f02df92b66a9776a689a2baf39e7474f3b703d89))
+
+## [20.17.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.16.0...v20.17.0) (2024-07-05)
+
+
+### Features
+
+* Add support for ML capacity block reservations with EKS managed node group(s) ([#3091](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3091)) ([ae3379e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ae3379e92429ed842f1c1017fd6ee59ec9f297d4))
+
+## [20.16.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.15.0...v20.16.0) (2024-07-02)
+
+
+### Features
+
+* Add support for custom IAM role policy ([#3087](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3087)) ([1604c6c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1604c6cdc8cedcd47b7357c5068dc11d0ed1d7e5))
+
+## [20.15.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.14.0...v20.15.0) (2024-06-27)
+
+
+### Features
+
+* Deny HTTP on Karpenter SQS policy ([#3080](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3080)) ([f6e071c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f6e071cd99faa56b988b63051b22df260e929b03))
+
+## [20.14.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.13.1...v20.14.0) (2024-06-13)
+
+
+### Features
+
+* Require users to supply OS via `ami_type` and not via `platform` which is unable to distinquish between the number of variants supported today ([#3068](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3068)) ([ef657bf](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ef657bfcb51296841f14cf514ffefb1066f810ee))
+
+## [20.13.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.13.0...v20.13.1) (2024-06-04)
+
+
+### Bug Fixes
+
+* Correct syntax for correctly ignoring `bootstrap_cluster_creator_admin_permissions` and not all of `access_config` ([#3056](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3056)) ([1e31929](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1e319290445a6eb50b53dfb89c9ae9f2949d38d7))
+
+## [20.13.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.12.0...v20.13.0) (2024-05-31)
+
+
+### Features
+
+* Starting with `1.30`, do not use the cluster OIDC issuer URL by default in the identity provider config ([#3055](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3055)) ([00f076a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/00f076ada4cd78c5c34b8be6e8eba44b628b629a))
+
+## [20.12.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.11.1...v20.12.0) (2024-05-28)
+
+
+### Features
+
+* Support additional cluster DNS IPs with Bottlerocket based AMIs ([#3051](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3051)) ([541dbb2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/541dbb29f12bb763a34b32acdaea9cea12d7f543))
+
+## [20.11.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.11.0...v20.11.1) (2024-05-21)
+
+
+### Bug Fixes
+
+* Ignore changes to `bootstrap_cluster_creator_admin_permissions` which is disabled by default  ([#3042](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3042)) ([c65d308](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c65d3085037d9c1c87f4fd3a5be1ca1d732dbf7a))
+
+## [20.11.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.10.0...v20.11.0) (2024-05-16)
+
+
+### Features
+
+* Add `SourceArn` condition to Fargate profile trust policy ([#3039](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3039)) ([a070d7b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a070d7b2bd92866b91e0963a0f819eec9839ed03))
+
+## [20.10.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.9.0...v20.10.0) (2024-05-09)
+
+
+### Features
+
+* Add support for Pod Identity assocation on Karpenter sub-module ([#3031](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3031)) ([cfcaf27](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/cfcaf27ac78278916ebf3d51dc64a20fe0d7bf01))
+
+## [20.9.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.5...v20.9.0) (2024-05-08)
+
+
+### Features
+
+* Propagate `ami_type` to self-managed node group; allow using `ami_type` only ([#3030](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3030)) ([74d3918](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/74d39187d855932dd976da6180eda42dcfe09873))
+
+## [20.8.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.4...v20.8.5) (2024-04-08)
+
+
+### Bug Fixes
+
+* Forces cluster outputs to wait until access entries are complete ([#3000](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3000)) ([e2a39c0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e2a39c0f261d776e4e18a650aa9068429c4f5ef4))
+
+## [20.8.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.3...v20.8.4) (2024-03-21)
+
+
+### Bug Fixes
+
+* Pass nodeadm user data variables from root module down to nodegroup sub-modules ([#2981](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2981)) ([84effa0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/84effa0e30f64ba2fceb7f89c2a822e92f1ee1ea))
+
+## [20.8.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.2...v20.8.3) (2024-03-12)
+
+
+### Bug Fixes
+
+* Ensure the correct service CIDR and IP family is used in the rendered user data ([#2963](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2963)) ([aeb9f0c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/aeb9f0c990b259320a6c3e5ff93be3f064bb9238))
+
+## [20.8.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.1...v20.8.2) (2024-03-11)
+
+
+### Bug Fixes
+
+* Ensure a default `ip_family` value is provided to guarantee a CNI policy is attached to nodes ([#2967](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2967)) ([29dcca3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/29dcca335d80e248c57b8efa2c36aaef2e1b1bd2))
+
+## [20.8.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.0...v20.8.1) (2024-03-10)
+
+
+### Bug Fixes
+
+* Do not attach policy if Karpenter node role is not created by module ([#2964](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2964)) ([3ad19d7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3ad19d7435f34600e4872fd131e155583e498cd9))
+
+## [20.8.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.7.0...v20.8.0) (2024-03-10)
+
+
+### Features
+
+* Replace the use of `toset()` with static keys for node IAM role policy attachment ([#2962](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2962)) ([57f5130](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/57f5130132ca11fd3e478a61a8fc082a929540c2))
+
+## [20.7.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.6.0...v20.7.0) (2024-03-09)
+
+
+### Features
+
+* Add supprot for creating placement group for managed node group ([#2959](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2959)) ([3031631](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/30316312f33fe7fd09faf86fdb1b01ab2a377b2a))
+
+## [20.6.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.3...v20.6.0) (2024-03-09)
+
+
+### Features
+
+* Add support for tracking latest AMI release version on managed nodegroups ([#2951](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2951)) ([393da7e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/393da7ec0ed158cf783356ab10959d91430c1d80))
+
+## [20.5.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.2...v20.5.3) (2024-03-08)
+
+
+### Bug Fixes
+
+* Update AWS provider version to support `AL2023_*` AMI types; ensure AL2023 user data receives cluster service CIDR ([#2960](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2960)) ([dfe4114](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/dfe41141c2385db783d97494792c8f2e227cfc7c))
+
+## [20.5.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.1...v20.5.2) (2024-03-07)
+
+
+### Bug Fixes
+
+* Use the `launch_template_tags` on the launch template ([#2957](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2957)) ([0ed32d7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0ed32d7b291513f34775ca85b0aa33da085d09fa))
+
+## [20.5.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.0...v20.5.1) (2024-03-07)
+
+
+### Bug Fixes
+
+* Update CI workflow versions to remove deprecated runtime warnings ([#2956](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2956)) ([d14cc92](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d14cc925c450451b023407d05a2516d7682d1617))
+
+## [20.5.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.4.0...v20.5.0) (2024-03-01)
+
+
+### Features
+
+* Add support for AL2023 `nodeadm` user data ([#2942](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2942)) ([7c99bb1](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7c99bb19cdbf1eb4f4543f9b8e6d29c3a6734a55))
+
+## [20.4.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.3.0...v20.4.0) (2024-02-23)
+
+
+### Features
+
+* Add support for enabling EFA resources ([#2936](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2936)) ([7f472ec](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7f472ec660049d4ca85de039cb3015c1b1d12fb8))
+
+## [20.3.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.2.2...v20.3.0) (2024-02-21)
+
+
+### Features
+
+* Add support for addon and identity provider custom tags ([#2938](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2938)) ([f6255c4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f6255c49e47d44bd62bb2b4e1e448ac80ceb2b3a))
+
+### [20.2.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.2.1...v20.2.2) (2024-02-21)
+
+
+### Bug Fixes
+
+* Replace Karpenter SQS policy dynamic service princpal DNS suffixes with static `amazonaws.com` ([#2941](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2941)) ([081c762](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/081c7624a5a4f2b039370ae8eb9ee8e445d01c48))
+
+### [20.2.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.2.0...v20.2.1) (2024-02-08)
+
+
+### Bug Fixes
+
+* Karpenter `enable_spot_termination = false` should not result in an error ([#2907](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2907)) ([671fc6e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/671fc6e627d957ada47ef3f33068d715e79d25d6))
+
+## [20.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.1.1...v20.2.0) (2024-02-06)
+
+
+### Features
+
+* Allow enable/disable of EKS pod identity for the Karpenter controller ([#2902](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2902)) ([cc6919d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/cc6919de811f3972815d4ca26e5e0c8f64c2b894))
+
+### [20.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.1.0...v20.1.1) (2024-02-06)
+
+
+### Bug Fixes
+
+* Update access entries `kubernetes_groups` default value to `null` ([#2897](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2897)) ([1e32e6a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1e32e6a9f8a389b1a4969dde697d34ba4e3c85ac))
+
+## [20.1.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.0.1...v20.1.0) (2024-02-06)
+
+
+### Features
+
+* Add output for `access_policy_associations` ([#2904](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2904)) ([0d2a4c2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0d2a4c2af3d7c8593226bbccbf8753950e741b15))
+
+### [20.0.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.0.0...v20.0.1) (2024-02-03)
+
+
+### Bug Fixes
+
+* Correct cluster access entry to create multiple policy associations per access entry ([#2892](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2892)) ([4177913](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/417791374cf72dfb673105359463398eb4a75d6e))
+
+## [20.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.21.0...v20.0.0) (2024-02-02)
+
+
+### ⚠ BREAKING CHANGES
+
+* Replace the use of `aws-auth` configmap with EKS cluster access entry (#2858)
+
+### Features
+
+* Replace the use of `aws-auth` configmap with EKS cluster access entry ([#2858](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2858)) ([6b40bdb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6b40bdbb1d283d9259f43b03d24dca99cc1eceff))
+
+## [19.21.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.20.0...v19.21.0) (2023-12-11)
+
+
+### Features
+
+* Add tags for CloudWatch log group only ([#2841](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2841)) ([4c5c97b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/4c5c97b5d404a4e46945e3b6228d469743669937))
+
+## [19.20.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.19.1...v19.20.0) (2023-11-14)
+
+
+### Features
+
+* Allow OIDC root CA thumbprint to be included/excluded ([#2778](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2778)) ([091c680](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/091c68051d9cbf24644121a24c715307f00c44b3))
+
+### [19.19.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.19.0...v19.19.1) (2023-11-10)
+
+
+### Bug Fixes
+
+* Remove additional conditional on Karpenter instance profile creation to support upgrading ([#2812](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2812)) ([c36c8dc](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c36c8dc825aa09e2ded20ff675905aa8857853cf))
+
+## [19.19.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.18.0...v19.19.0) (2023-11-04)
+
+
+### Features
+
+* Update KMS module to avoid calling data sources when `create_kms_key = false` ([#2804](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2804)) ([0732bea](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0732bea85f46fd2629705f9ee5f87cb695ee95e5))
+
+## [19.18.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.4...v19.18.0) (2023-11-01)
+
+
+### Features
+
+* Add Karpenter v1beta1 compatibility ([#2800](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2800)) ([aec2bab](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/aec2bab1d8da89b65b84d11fef77cbc969fccc91))
+
+### [19.17.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.3...v19.17.4) (2023-10-30)
+
+
+### Bug Fixes
+
+* Updating license_specification result type ([#2798](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2798)) ([ba0ebeb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ba0ebeb11a64a6400a3666165509975d5cdfea43))
+
+### [19.17.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.2...v19.17.3) (2023-10-30)
+
+
+### Bug Fixes
+
+* Correct key used on `license_configuration_arn` ([#2796](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2796)) ([bd4bda2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/bd4bda266e23635c7ca09b6e9d307b29ef6b8579))
+
+### [19.17.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.1...v19.17.2) (2023-10-10)
+
+
+### Bug Fixes
+
+* Karpenter node IAM role policies variable should be a map of strings, not list ([#2771](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2771)) ([f4766e5](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f4766e5c27f060e8c7f5950cf82d1fe59c3231af))
+
+### [19.17.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.0...v19.17.1) (2023-10-06)
+
+
+### Bug Fixes
+
+* Only include CA thumbprint in OIDC provider list ([#2769](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2769)) ([7e5de15](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7e5de1566c7e1330c05c5e6c51f5ab4690001915)), closes [#2732](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2732) [#32847](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/32847)
+
+## [19.17.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.16.0...v19.17.0) (2023-10-06)
+
+
+### Features
+
+* Add support for `allowed_instance_types` on self-managed nodegroup ASG ([#2757](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2757)) ([feee18d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/feee18dd423b1e76f8a5119206f23306e5879b26))
+
+## [19.16.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.4...v19.16.0) (2023-08-03)
+
+
+### Features
+
+* Add `node_iam_role_arns` local variable to check for Windows platform on EKS managed nodegroups ([#2477](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2477)) ([adb47f4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/adb47f46dc53b1a0c18691a59dc58401c327c0be))
+
+### [19.15.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.3...v19.15.4) (2023-07-27)
+
+
+### Bug Fixes
+
+* Use `coalesce` when desired default value is not `null` ([#2696](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2696)) ([c86f8d4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c86f8d4db3236e7dae59ef9142da4d7e496138c8))
+
+### [19.15.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.2...v19.15.3) (2023-06-09)
+
+
+### Bug Fixes
+
+* Snapshot permissions issue for Karpenter submodule ([#2649](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2649)) ([6217d0e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6217d0eaab4c864ec4d40a31538e78a7fbcee5e3))
+
+### [19.15.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.1...v19.15.2) (2023-05-30)
+
+
+### Bug Fixes
+
+* Ensure `isra_tag_values` can be tried before defaulting to `cluster_name` on Karpenter module ([#2631](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2631)) ([6c56e2a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6c56e2ad20057a5672526b5484df96806598a4e2))
+
+### [19.15.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.0...v19.15.1) (2023-05-24)
+
+
+### Bug Fixes
+
+* Revert changes to ignore `role_last_used` ([#2629](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2629)) ([e23139a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e23139ad2da0c31c8aa644ae0516ba9ee2a66399))
+
+## [19.15.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.14.0...v19.15.0) (2023-05-24)
+
+
+### Features
+
+* Ignore changes to *.aws_iam_role.*.role_last_used ([#2628](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2628)) ([f8ea3d0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f8ea3d08adbc4abfb18a77ad44e30b93cd05c050))
+
+## [19.14.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.13.1...v19.14.0) (2023-05-17)
+
+
+### Features
+
+* Add irsa_tag_values variable ([#2584](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2584)) ([aa3bdf1](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/aa3bdf1c19747bca7067c6e49c071ae80a9ca5e5))
+
+### [19.13.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.13.0...v19.13.1) (2023-04-18)
+
+
+### Bug Fixes
+
+* SQS queue encryption types selection ([#2575](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2575)) ([969c7a7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/969c7a7c4340c8ed327d18f86c5e00e18190a48b))
+
+## [19.13.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.12.0...v19.13.0) (2023-04-12)
+
+
+### Features
+
+* Add support for allowed_instance_type ([#2552](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2552)) ([54417d2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/54417d244c06b459b399e84433343af6e9934bb3))
+
+## [19.12.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.11.0...v19.12.0) (2023-03-31)
+
+
+### Features
+
+* Add Autoscaling schedule for EKS managed node group ([#2504](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2504)) ([4a2523c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/4a2523cddd4498f3ece5aee2eedf618dd701eb59))
+
+## [19.11.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.10.3...v19.11.0) (2023-03-28)
+
+
+### Features
+
+* Add optional list of policy ARNs for attachment to Karpenter IRSA ([#2537](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2537)) ([bd387d6](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/bd387d69fac5a431a426e12de786ab80aea112a6))
+
+### [19.10.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.10.2...v19.10.3) (2023-03-23)
+
+
+### Bug Fixes
+
+* Add `aws_eks_addons.before_compute` to the `cluster_addons` output ([#2533](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2533)) ([f977d83](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f977d83500ac529b09918d4e78aa8887749a8cd1))
+
+### [19.10.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.10.1...v19.10.2) (2023-03-23)
+
+
+### Bug Fixes
+
+* Add Name tag for EKS cloudwatch log group ([#2500](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2500)) ([e64a490](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e64a490d8db4ebf495f42c542a40d7d763005873))
+
+### [19.10.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.10.0...v19.10.1) (2023-03-17)
+
+
+### Bug Fixes
+
+* Return correct status for mng ([#2524](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2524)) ([e257daf](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e257dafe94e11384caf210d9ff21c4d3e078cb17))
+
+## [19.10.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.9.0...v19.10.0) (2023-02-17)
+
+
+### Features
+
+* Allow setting custom IRSA policy name for karpenter ([#2480](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2480)) ([8954ff7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8954ff7bb433358ba99b77248e3aae377d3a580b))
+
+## [19.9.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.8.0...v19.9.0) (2023-02-17)
+
+
+### Features
+
+* Add support for enabling addons before data plane compute is created ([#2478](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2478)) ([78027f3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/78027f37e43c79748cd7528d3803122cb8072ed7))
+
+## [19.8.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.7.0...v19.8.0) (2023-02-15)
+
+
+### Features
+
+* Add auto discovery permission of cluster endpoint to Karpenter role ([#2451](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2451)) ([c4a4b8a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c4a4b8afe3d1e89117573e9e04aea08871a069dc))
+
+## [19.7.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.6.0...v19.7.0) (2023-02-07)
+
+
+### Features
+
+* Allow to pass prefix for rule names ([#2437](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2437)) ([68fe60f](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/68fe60f1c4e975d7f6f2c22ae891a32fd80a0156))
+
+## [19.6.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.5.1...v19.6.0) (2023-01-28)
+
+
+### Features
+
+* Add prometheus-adapter port 6443 to recommended sec groups ([#2399](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2399)) ([059dc0c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/059dc0c67c2aebbf2c9a2f0a05856a823dd1b5a0))
+
+### [19.5.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.5.0...v19.5.1) (2023-01-05)
+
+
+### Bug Fixes
+
+* AMI lookup should only happen when launch template is created ([#2386](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2386)) ([3834935](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/383493538748f1df844d40068cdde62579b79476))
+
+## [19.5.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.4.3...v19.5.0) (2023-01-05)
+
+
+### Features
+
+* Ignore changes to labels and annotations on on `aws-auth` ConfigMap ([#2380](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2380)) ([5015b42](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/5015b429e656d927fb66f214c998713c6fc84755))
+
+### [19.4.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.4.2...v19.4.3) (2023-01-05)
+
+
+### Bug Fixes
+
+* Use a version for  to avoid GitHub API rate limiting on CI workflows ([#2376](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2376)) ([460e43d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/460e43db77244ad3ca2e62514de712fb0cc2cd7a))
+
+### [19.4.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.4.1...v19.4.2) (2022-12-20)
+
+
+### Bug Fixes
+
+* Drop spot-instances-request from tag_specifications ([#2363](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2363)) ([e391a99](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e391a99a7bd8209618fdb65cc09460673fbaf1bc))
+
+### [19.4.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.4.0...v19.4.1) (2022-12-20)
+
+
+### Bug Fixes
+
+* Correct `eks_managed_*` to `self_managed_*` for `tag_specification` argument ([#2364](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2364)) ([df7c57c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/df7c57c199d9e9f54d9ed18fb7c1e3a47ad732ed))
+
+## [19.4.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.3.1...v19.4.0) (2022-12-19)
+
+
+### Features
+
+* Allow configuring which tags are passed on launch template tag specifications ([#2360](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2360)) ([094ed1d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/094ed1d5e461552a0a76bc019c36690fe0fc2dd5))
+
+### [19.3.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.3.0...v19.3.1) (2022-12-18)
+
+
+### Bug Fixes
+
+* Correct map name for security group rule 4443/tcp ([#2354](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2354)) ([13a9542](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/13a9542dadd29fa75fd76c2adcee9dd17dcffda4))
+
+## [19.3.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.2.0...v19.3.0) (2022-12-18)
+
+
+### Features
+
+* Add additional port for `metrics-server` to recommended rules ([#2353](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2353)) ([5a270b7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/5a270b7bf8de8c5846e91d72ffd9f594cbd8b921))
+
+## [19.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.1.1...v19.2.0) (2022-12-18)
+
+
+### Features
+
+* Ensure all supported resources are tagged under `tag_specifications` on launch templates ([#2352](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2352)) ([0751a0c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0751a0ca04d6303015e8a9c2f917956ea00d184b))
+
+### [19.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.1.0...v19.1.1) (2022-12-17)
+
+
+### Bug Fixes
+
+* Use IAM session context data source to resolve the identities role when using `assumed_role` ([#2347](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2347)) ([71b8eca](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/71b8ecaa87db89c454b2c9446ff3d7675e4dc5a7))
+
+## [19.1.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.0.4...v19.1.0) (2022-12-16)
+
+
+### Features
+
+* Add support for addon `configuration_values` ([#2345](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2345)) ([3b62f6c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3b62f6c31604490fc19184e626e73873b296ecd1))
+
+### [19.0.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.0.3...v19.0.4) (2022-12-07)
+
+
+### Bug Fixes
+
+* Ensure that custom KMS key is not created if encryption is not enabled, support computed values in cluster name ([#2328](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2328)) ([b83f6d9](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b83f6d98bfbca548012ea74e792fe14f04f0e6dc))
+
+### [19.0.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.0.2...v19.0.3) (2022-12-07)
+
+
+### Bug Fixes
+
+* Invalid value for "replace" parameter: argument must not be null. ([#2322](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2322)) ([9adc475](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/9adc475bc1f1a201648e37b26cefe9bdf6b3a2f7))
+
+### [19.0.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.0.1...v19.0.2) (2022-12-06)
+
+
+### Bug Fixes
+
+* `public_access_cidrs` require a value even if public endpoint is disabled ([#2320](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2320)) ([3f6d915](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3f6d915eef6672440df8c82468c31ed2bc2fce54))
+
+### [19.0.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.0.0...v19.0.1) (2022-12-06)
+
+
+### Bug Fixes
+
+* Call to lookup() closed too early, breaks sg rule creation in cluster sg if custom source sg is defined. ([#2319](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2319)) ([7bc4a27](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7bc4a2743f0cdf9c8556a2c067eeb82436aafb41))
+
+## [19.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.31.2...v19.0.0) (2022-12-05)
+
+
+### ⚠ BREAKING CHANGES
+
+* Add support for Outposts, remove node security group, add support for addon `preserve` and `most_recent` configurations (#2250)
+
+### Features
+
+* Add support for Outposts, remove node security group, add support for addon `preserve` and `most_recent` configurations ([#2250](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2250)) ([b2e97ca](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b2e97ca3dcbcd76063f1c932aa5199b4f49a2aa1))
+
+### [18.31.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.31.1...v18.31.2) (2022-11-23)
+
+
+### Bug Fixes
+
+* Ensure that `var.create` is tied to all resources correctly ([#2308](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2308)) ([3fb28b3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3fb28b357f4fc9144340f94abe9dd520e89f49e2))
+
+### [18.31.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.31.0...v18.31.1) (2022-11-22)
+
+
+### Bug Fixes
+
+* Include all certificate fingerprints in the OIDC provider thumbprint list ([#2307](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2307)) ([7436178](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7436178cc1a720a066c73f1de23b04b3c24ae608))
+
+## [18.31.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.30.3...v18.31.0) (2022-11-21)
+
+
+### Features
+
+* New Karpenter sub-module for easily enabling Karpenter on EKS ([#2303](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2303)) ([f24de33](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f24de3326d3c12ce61fbaefe1e3dbe7418d8bc85))
+
+### [18.30.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.30.2...v18.30.3) (2022-11-07)
+
+
+### Bug Fixes
+
+* Update CI configuration files to use latest version ([#2293](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2293)) ([364c60d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/364c60d572e85676adca8f6e62679de7d9551271))
+
+### [18.30.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.30.1...v18.30.2) (2022-10-14)
+
+
+### Bug Fixes
+
+* Disable creation of cluster security group rules that map to node security group when `create_node_security_group` = `false` ([#2274](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2274)) ([28ccece](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/28ccecefe22d81a3a7febbbc3efc17c6590f88e1))
+
+### [18.30.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.30.0...v18.30.1) (2022-10-11)
+
+
+### Bug Fixes
+
+* Update CloudWatch log group creation deny policy to use wildcard ([#2267](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2267)) ([ac4d549](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ac4d549629aa64bbd92f80486bef904a9098e0fa))
+
+## [18.30.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.29.1...v18.30.0) (2022-09-29)
+
+
+### Features
+
+* Add output for cluster TLS certificate SHA1 fingerprint and provider tags to cluster primary security group ([#2249](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2249)) ([a74e980](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a74e98017b5dc7ed396cf26bfaf98ff7951c9e2e))
+
+### [18.29.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.29.0...v18.29.1) (2022-09-26)
+
+
+### Bug Fixes
+
+* Set `image_id` to come from the launch template instead of data source for self-managed node groups ([#2239](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2239)) ([c5944e5](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c5944e5fb6ea07429ef79f5fe5592e7111567e1e))
+
+## [18.29.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.28.0...v18.29.0) (2022-08-26)
+
+
+### Features
+
+* Allow TLS provider to use versions 3.0+ (i.e. - `>= 3.0`) ([#2211](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2211)) ([f576a6f](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f576a6f9ea523c94a7bb5420d5ab3ed8c7d3fec7))
+
+## [18.28.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.27.1...v18.28.0) (2022-08-17)
+
+
+### Features
+
+* Add output for launch template name, and correct variable type value ([#2205](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2205)) ([0a52d69](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0a52d690d54a7c39fd4e0d46db36d200f7ef679e))
+
+### [18.27.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.27.0...v18.27.1) (2022-08-09)
+
+
+### Bug Fixes
+
+* Remove empty `""` from node group names output when node group creation is disabled ([#2197](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2197)) ([d2f162b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d2f162b190596756f1bc9d8f8061e68329c3e5c4))
+
+## [18.27.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.6...v18.27.0) (2022-08-09)
+
+
+### Features
+
+* Default to clusters OIDC issuer URL for `aws_eks_identity_provider_config` ([#2190](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2190)) ([93065fa](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/93065fabdf508267b399f677d561f18fd6d7b7f0))
+
+### [18.26.6](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.5...v18.26.6) (2022-07-22)
+
+
+### Bug Fixes
+
+* Pin TLS provider version to 3.x versions only ([#2174](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2174)) ([d990ea8](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d990ea8aff682315828d7c177a309c71541e023c))
+
+### [18.26.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.4...v18.26.5) (2022-07-20)
+
+
+### Bug Fixes
+
+* Bump kms module to 1.0.2 to fix malformed policy document when not specifying key_owners ([#2163](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2163)) ([0fd1ab1](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0fd1ab1db9b752e58211428e3c19f62655e5f97d))
+
+### [18.26.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.3...v18.26.4) (2022-07-20)
+
+
+### Bug Fixes
+
+* Use partition data source on VPC CNI IPv6 policy ([#2161](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2161)) ([f2d67ff](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f2d67ffa97cc0f9827f75673b1cd263e3a5062b6))
+
+### [18.26.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.2...v18.26.3) (2022-07-05)
+
+
+### Bug Fixes
+
+* Correct Fargate profiles additional IAM role policies default type to match variable ([#2143](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2143)) ([c4e6d28](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c4e6d28fc064435f6f05c6c57d7fff8576d9fbba))
+
+### [18.26.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.1...v18.26.2) (2022-07-01)
+
+
+### Bug Fixes
+
+* Correct variable types to improve dynamic check correctness ([#2133](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2133)) ([2d7701c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/2d7701c3b0f2c6dcc10f31fc1f703bfde31b2c5b))
+
+### [18.26.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.26.0...v18.26.1) (2022-06-29)
+
+
+### Bug Fixes
+
+* Update KMS module version which aligns on module version requirements ([#2127](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2127)) ([bc04cd3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/bc04cd3a0a4286566ea56b20d9314115c6e489ab))
+
+## [18.26.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.25.0...v18.26.0) (2022-06-28)
+
+
+### Features
+
+* Add support for specifying NTP address to use private Amazon Time Sync Service ([#2125](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2125)) ([4543ab4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/4543ab454bea80b64381b88a631d955a7cfae247))
+
+## [18.25.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.24.1...v18.25.0) (2022-06-28)
+
+
+### Features
+
+* Add support for creating KMS key for cluster secret encryption ([#2121](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2121)) ([75acb09](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/75acb09ec56c5ce8e5f74ebc7bf15468b272db8a))
+
+### [18.24.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.24.0...v18.24.1) (2022-06-19)
+
+
+### Bug Fixes
+
+* Remove `modified_at` from ignored changes on EKS addons ([#2114](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2114)) ([5a5a32e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/5a5a32ed1241ba3cc64abe37b37bcb5ad52d42c4))
+
+## [18.24.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.23.0...v18.24.0) (2022-06-18)
+
+
+### Features
+
+* Add support for specifying control plane subnets separate from those used by node groups (data plane) ([#2113](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2113)) ([ebc91bc](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ebc91bcd37a919a350d872a5b235ccc2a79955a6))
+
+## [18.23.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.22.0...v18.23.0) (2022-06-02)
+
+
+### Features
+
+* Add `autoscaling_group_tags` variable to self-managed-node-groups ([#2084](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2084)) ([8584dcb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8584dcb2e0c9061828505c36a8ed8eb6ced02053))
+
+## [18.22.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.21.0...v18.22.0) (2022-06-02)
+
+
+### Features
+
+* Apply `distinct()` on role arns to ensure no duplicated roles in aws-auth configmap ([#2097](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2097)) ([3feb369](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3feb36927f92fb72ab0cfc25a3ab67465872f4bf))
+
+## [18.21.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.20.5...v18.21.0) (2022-05-12)
+
+
+### Features
+
+* Add `create_autoscaling_group` option and extra outputs ([#2067](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2067)) ([58420b9](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/58420b92a0838aa2e17b156b174893b349083a2b))
+
+### [18.20.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.20.4...v18.20.5) (2022-04-21)
+
+
+### Bug Fixes
+
+* Add conditional variable to allow users to opt out of tagging cluster primary security group ([#2034](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2034)) ([51e4182](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/51e418216f210647b69bbd06e569a061c2f0e3c1))
+
+### [18.20.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.20.3...v18.20.4) (2022-04-20)
+
+
+### Bug Fixes
+
+* Correct DNS suffix for OIDC provider ([#2026](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2026)) ([5da692d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/5da692df67cae313711e94216949d1105da6a87f))
+
+### [18.20.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.20.2...v18.20.3) (2022-04-20)
+
+
+### Bug Fixes
+
+* Add `compact()` to `aws_auth_configmap_yaml` for when node groups are set to `create = false` ([#2029](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2029)) ([c173ba2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c173ba2d62d228729fe6c68f713af6dbe15e7233))
+
+### [18.20.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.20.1...v18.20.2) (2022-04-12)
+
+
+### Bug Fixes
+
+* Avoid re-naming the primary security group through a `Name` tag and leave to the EKS service to manage ([#2010](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2010)) ([b5ae5da](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b5ae5daa39f8380dc21c9ef1daff22242930692e))
+
+### [18.20.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.20.0...v18.20.1) (2022-04-09)
+
+
+### Bug Fixes
+
+* iam_role_user_name_prefix type as an bool ([#2000](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2000)) ([c576aad](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c576aadce968d09f3295fc06f0766cc9e2a35e29))
+
+## [18.20.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.19.0...v18.20.0) (2022-04-09)
+
+
+### Features
+
+* Add support for managing `aws-auth` configmap using new `kubernetes_config_map_v1_data` resource ([#1999](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1999)) ([da3d54c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/da3d54cde70adfd8b5d2770805b17d526923113e))
+
+## [18.19.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.18.0...v18.19.0) (2022-04-04)
+
+
+### Features
+
+* Add `create_before_destroy` lifecycle hook to security groups created ([#1985](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1985)) ([6db89f8](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6db89f8f20a58ae5cfbab5541ff7e499ddf971b8))
+
+## [18.18.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.17.1...v18.18.0) (2022-04-03)
+
+
+### Features
+
+* Add support for allowing EFA network interfaces ([#1980](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1980)) ([523144e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/523144e1d7d4f64ccf30656078fd10d7cd63a444))
+
+### [18.17.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.17.0...v18.17.1) (2022-04-02)
+
+
+### Bug Fixes
+
+* Correct `capacity_reservation_target` within launch templates of both EKS and self managed node groups ([#1979](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1979)) ([381144e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/381144e3bb604b3086ceea537a6052a6179ce5b3))
+
+## [18.17.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.16.0...v18.17.0) (2022-03-30)
+
+
+### Features
+
+* Add back in CloudWatch log group create deny policy to cluster IAM role ([#1974](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1974)) ([98e137f](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/98e137fad990d51a31d86e908ea593e933fc22a9))
+
+## [18.16.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.15.0...v18.16.0) (2022-03-29)
+
+
+### Features
+
+* Support default_tags in aws_autoscaling_group ([#1973](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1973)) ([7a9458a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7a9458af52ddf1f6180324e845b1e8a26fd5c1f5))
+
+## [18.15.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.14.1...v18.15.0) (2022-03-25)
+
+
+### Features
+
+* Update TLS provider and remove unnecessary cloud init version requirements ([#1966](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1966)) ([0269d38](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0269d38fcae2b1ca566427159d33910fe96299a7))
+
+### [18.14.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.14.0...v18.14.1) (2022-03-24)
+
+
+### Bug Fixes
+
+* Default to cluster version for EKS and self managed node groups when a `cluster_version` is not specified ([#1963](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1963)) ([fd3a3e9](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/fd3a3e9a96d9a8fa9b22446e2ac8c36cdf68c5fc))
+
+## [18.14.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.13.0...v18.14.0) (2022-03-24)
+
+
+### Features
+
+* Add tags to EKS created cluster security group to match rest of module tagging scheme ([#1957](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1957)) ([9371a29](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/9371a2943b13cc2d9ceb34aef14ec2ccee1cb721))
+
+## [18.13.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.12.0...v18.13.0) (2022-03-23)
+
+
+### Features
+
+* Allow users to selectively attach the EKS created cluster primary security group to nodes ([#1952](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1952)) ([e21db83](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e21db83d8ff3cd1d3f49acc611931e8917d0b6f8))
+
+## [18.12.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.11.0...v18.12.0) (2022-03-22)
+
+
+### Features
+
+* Add outputs for autoscaling group names created to aid in autoscaling group tagging ([#1953](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1953)) ([8b03b7b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8b03b7b85ef80db5de766827ef65b700317c68e6))
+
+## [18.11.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.10.2...v18.11.0) (2022-03-18)
+
+
+### Features
+
+* Allow users to specify default launch template name in node groups ([#1946](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1946)) ([a9d2cc8](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a9d2cc8246128fc7f426f0b4596c6799ecf94d8a))
+
+### [18.10.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.10.1...v18.10.2) (2022-03-17)
+
+
+### Bug Fixes
+
+* Sub-modules output the correct eks worker iam arn when workers utilize custom iam role ([#1912](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1912)) ([06a3469](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/06a3469d203fc4344d5f94564762432b5cfd2043))
+
+### [18.10.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.10.0...v18.10.1) (2022-03-15)
+
+
+### Bug Fixes
+
+* Compact result of cluster security group to avoid disruptive updates when no security groups are supplied ([#1934](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1934)) ([5935670](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/5935670503bba3405b53e49ddd88a6451f534d4a))
+
+## [18.10.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.9.0...v18.10.0) (2022-03-12)
+
+
+### Features
+
+* Made it clear that we stand with Ukraine ([fad350d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/fad350d5bf36a7e39aa3840926b4c9968e9f594c))
+
+## [18.9.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.8.1...v18.9.0) (2022-03-09)
+
+
+### Features
+
+* Add variables to allow users to control attributes on `cluster_encryption` IAM policy ([#1928](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1928)) ([2df1572](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/2df1572b8a031fbd31a845cc5c61f015ec387f56))
+
+### [18.8.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.8.0...v18.8.1) (2022-03-02)
+
+
+### Bug Fixes
+
+* Ensure that cluster encryption policy resources are only relevant when creating the IAM role ([#1917](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1917)) ([0fefca7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0fefca76f2258cee565359e36a4851978602f36d))
+
+## [18.8.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.7.3...v18.8.0) (2022-03-02)
+
+
+### Features
+
+* Add additional IAM policy to allow cluster role to use KMS key provided for cluster encryption ([#1915](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1915)) ([7644952](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7644952131a466ca22ba5b3e62cd988e01eff716))
+
+### [18.7.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.7.2...v18.7.3) (2022-03-02)
+
+
+### Bug Fixes
+
+* Add support for overriding DNS suffix for cluster IAM role service principal endpoint ([#1905](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1905)) ([9af0c24](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/9af0c2495a1fe7a02411ac436f48f6d9ca8b359f))
+
+### [18.7.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.7.1...v18.7.2) (2022-02-16)
+
+
+### Bug Fixes
+
+* Update examples to show integration and usage of new IRSA submodule ([#1882](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1882)) ([8de02b9](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8de02b9ff4690d1bbefb86d3441662b16abb03dd))
+
+### [18.7.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.7.0...v18.7.1) (2022-02-15)
+
+
+### Bug Fixes
+
+* Add missing quotes to block_duration_minutes ([#1881](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1881)) ([8bc6488](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8bc6488d559d603b539bc1a9c4eb8c57c529b25e))
+
+## [18.7.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.6.1...v18.7.0) (2022-02-15)
+
+
+### Features
+
+* Add variable to provide additional OIDC thumbprints ([#1865](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1865)) ([3fc9f2d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3fc9f2d69c32a2536aaee45adbe0c3449d7fc986))
+
+### [18.6.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.6.0...v18.6.1) (2022-02-15)
+
+
+### Bug Fixes
+
+* Update autoscaling group `tags` -> `tag` to support v4 of AWS provider ([#1866](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1866)) ([74ad4b0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/74ad4b09b7bbee857c833cb92afe07499356831d))
+
+## [18.6.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.5.1...v18.6.0) (2022-02-11)
+
+
+### Features
+
+* Add additional output for OIDC provider (issuer URL without leading `https://`) ([#1870](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1870)) ([d3b6847](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d3b68479dea49076a36e0c39e8c41407f270dcad))
+
+### [18.5.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.5.0...v18.5.1) (2022-02-09)
+
+
+### Bug Fixes
+
+* Use existing node security group when one is provided ([#1861](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1861)) ([c821ba7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c821ba78ca924273d17e9c3b15eae05dd7fb9c94))
+
+## [18.5.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.4.1...v18.5.0) (2022-02-08)
+
+
+### Features
+
+* Allow conditional creation of node groups to be set within node group definitions ([#1848](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1848)) ([665f468](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/665f468c1f4839836b1cb5fa5f18ebba17696288))
+
+### [18.4.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.4.0...v18.4.1) (2022-02-07)
+
+
+### Bug Fixes
+
+* Add node group dependency for EKS addons resource creation ([#1840](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1840)) ([2515e0e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/2515e0e561509d026fd0d4725ab0bd864e7340f9))
+
+## [18.4.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.3.1...v18.4.0) (2022-02-06)
+
+
+### Features
+
+* enable IRSA by default ([#1849](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1849)) ([21c3802](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/21c3802dea52bf51ab99c322fcfdce554086a794))
+
+### [18.3.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.3.0...v18.3.1) (2022-02-04)
+
+
+### Bug Fixes
+
+* The `block_duration_minutes` attribute under launch template `spot_options` is not a required ([#1847](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1847)) ([ccc4747](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ccc4747122b29ac35975e3c89edaa6ee28a86e4a))
+
+## [18.3.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.7...v18.3.0) (2022-02-03)
+
+
+### Features
+
+* Add launch_template_tags variable for additional launch template tags ([#1835](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1835)) ([9186def](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/9186defcf6ef72502131cffb8b781e1591d2139e))
+
+### [18.2.7](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.6...v18.2.7) (2022-02-02)
+
+
+### Bug Fixes
+
+* Don't tag self managed node security group with kubernetes.io/cluster tag ([#1774](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1774)) ([a638e4a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a638e4a754c15ab230cfb0e91de026e038ca4e26))
+
+### [18.2.6](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.5...v18.2.6) (2022-02-01)
+
+
+### Bug Fixes
+
+* Wrong rolearn in aws_auth_configmap_yaml ([#1820](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1820)) ([776009d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/776009d74b16e97974534668ca01a950d660166a))
+
+### [18.2.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.4...v18.2.5) (2022-02-01)
+
+
+### Bug Fixes
+
+* Correct issue where custom launch template is not used when EKS managed node group is used externally ([#1824](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1824)) ([e16b3c4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e16b3c4cbd5f139d54467965f690e79f8e68b76b))
+
+### [18.2.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.3...v18.2.4) (2022-01-30)
+
+
+### Bug Fixes
+
+* add missing `launch_template_use_name_prefix` parameter to the root module ([#1818](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1818)) ([d6888b5](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d6888b5eb6748a065063b0679f228f9fbbf93284))
+
+### [18.2.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.2...v18.2.3) (2022-01-24)
+
+
+### Bug Fixes
+
+* Add missing `mixed_instances_policy` parameter to the root module ([#1808](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1808)) ([4af77f2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/4af77f244a558ec66db6561488a5d8cd0c0f1aed))
+
+### [18.2.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.1...v18.2.2) (2022-01-22)
+
+
+### Bug Fixes
+
+* Attributes in timeouts are erroneously reversed ([#1804](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1804)) ([f8fe584](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f8fe584d5b50cc4009ac6c34e3bbb33a4e282f2e))
+
+### [18.2.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.2.0...v18.2.1) (2022-01-18)
+
+
+### Bug Fixes
+
+* Change `instance_metadata_tags` to default to `null`/`disabled` due to tag key pattern conflict ([#1788](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1788)) ([8e4dfa2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/8e4dfa2be5c60e98a9b20a8ae716c5c446fe935c))
+
+## [18.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.1.0...v18.2.0) (2022-01-14)
+
+
+### Features
+
+* Add `instance_metadata_tags` attribute to launch templates ([#1781](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1781)) ([85bb1a0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/85bb1a00b6111845141a8c07a9459bbd160d7ed3))
+
+## [18.1.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.6...v18.1.0) (2022-01-14)
+
+
+### Features
+
+* Add support for networking `ip_family` which enables support for IPV6 ([#1759](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1759)) ([314192e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/314192e2ebc5faaf5f027a7d868cd36c4844aee1))
+
+### [18.0.6](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.5...v18.0.6) (2022-01-11)
+
+
+### Bug Fixes
+
+* Correct remote access variable for security groups and add example for additional IAM policies ([#1766](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1766)) ([f54bd30](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f54bd3047ba18179766641e347fe9f4fa60ff11b))
+
+### [18.0.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.4...v18.0.5) (2022-01-08)
+
+
+### Bug Fixes
+
+* Use the prefix_separator var for node sg prefix ([#1751](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1751)) ([62879dd](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/62879dd81a69ba010f19ba9ece8392e1730b53e0))
+
+### [18.0.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.3...v18.0.4) (2022-01-07)
+
+
+### Bug Fixes
+
+* Not to iterate over remote_access object in dynamic block ([#1743](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1743)) ([86b3c33](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/86b3c339a772e76239f97a9bb1f710199d1bd04a))
+
+### [18.0.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.2...v18.0.3) (2022-01-06)
+
+
+### Bug Fixes
+
+* Remove trailing hyphen from cluster security group and iam role name prefix ([#1745](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1745)) ([7089c71](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7089c71e64dbae281435629e19d647ae6952f9ac))
+
+### [18.0.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.1...v18.0.2) (2022-01-06)
+
+
+### Bug Fixes
+
+* Change variable "node_security_group_additional_rules" from type map(any) to any ([#1747](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1747)) ([8921827](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/89218279d4439110439ca4cb8ac94575ab92b042))
+
+### [18.0.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v18.0.0...v18.0.1) (2022-01-06)
+
+
+### Bug Fixes
+
+* Correct conditional map for cluster security group additional rules ([#1738](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1738)) ([a2c7caa](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a2c7caac9f01ef167994d8b62afb5f997d0fac66))
+
+## [18.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.24.0...v18.0.0) (2022-01-05)
+
+
+### ⚠ BREAKING CHANGES
+
+* Removed support for launch configuration and replace `count` with `for_each` (#1680)
+
+### Features
+
+* Removed support for launch configuration and replace `count` with `for_each` ([#1680](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1680)) ([ee9f0c6](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ee9f0c646a45ca9baa6174a036d1e09bcccb87b1))
+
+
+### Bug Fixes
+
+* Update preset rule on semantic-release to use conventional commits ([#1736](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1736)) ([be86c0b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/be86c0b898c34943e898e2ecd4994bb7904663ff))
+
+# [17.24.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.23.0...v17.24.0) (2021-11-22)
+
+
+### Bug Fixes
+
+* Added Deny for CreateLogGroup action in EKS cluster role ([#1594](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1594)) ([6959b9b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6959b9bae32309357bc97a85a1f09c7b590c8a6d))
+* update CI/CD process to enable auto-release workflow ([#1698](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1698)) ([b876ff9](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b876ff95136fbb419cbb33feaa8f354a053047e0))
+
+
+### Features
+
+* Add ability to define custom timeout for fargate profiles ([#1614](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1614)) ([b7539dc](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/b7539dc220f6b5fe199d67569b6f3619ec00fdf0))
+* Removed ng_depends_on variable and related hack ([#1672](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1672)) ([56e93d7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/56e93d77de58f311f1d1d7051f40bf77e7b03524))
+
+<a name="v17.23.0"></a>
+## [v17.23.0] - 2021-11-02
+FEATURES:
+- Added support for client.authentication.k8s.io/v1beta1 ([#1550](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1550))
+- Improve managed node group bootstrap revisited ([#1577](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1577))
+
+BUG FIXES:
+- Fixed variable reference for snapshot_id ([#1634](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1634))
+
+
+<a name="v17.22.0"></a>
+## [v17.22.0] - 2021-10-14
+BUG FIXES:
+- MNG cluster datasource errors ([#1639](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1639))
+
+
+<a name="v17.21.0"></a>
+## [v17.21.0] - 2021-10-12
+FEATURES:
+- Fix custom AMI bootstrap ([#1580](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1580))
+- Enable throughput & iops configs for managed node_groups ([#1584](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1584))
+- Allow snapshot_id to be specified for additional_ebs_volumes ([#1431](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1431))
+- Allow interface_type to be specified in worker_groups_launch_template ([#1439](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1439))
+
+BUG FIXES:
+- Rebuild examples ([#1625](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1625))
+- Bug with data source in managed groups submodule ([#1633](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1633))
+- Fixed launch_templates_with_managed_node_group example ([#1599](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1599))
+
+DOCS:
+- Update iam-permissions.md ([#1613](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1613))
+- Updated iam-permissions.md ([#1612](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1612))
+- Updated faq about desired count of instances in node and worker groups ([#1604](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1604))
+- Update faq about endpoints ([#1603](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1603))
+- Fix broken URL in README ([#1602](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1602))
+- Remove `asg_recreate_on_change` in faq ([#1596](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1596))
+
+
+<a name="v17.20.0"></a>
+## [v17.20.0] - 2021-09-17
+FEATURES:
+- Ability to specify cluster update timeout ([#1588](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1588))
+
+
+<a name="v17.19.0"></a>
+## [v17.19.0] - 2021-09-16
+REFACTORS:
+- Refactoring to match the rest of terraform-aws-modules ([#1583](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1583))
+
+
+<a name="v17.18.0"></a>
+## [v17.18.0] - 2021-09-08
+FEATURES:
+- Add metadata_options for node_groups ([#1485](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1485))
+
+
+<a name="v17.17.0"></a>
+## [v17.17.0] - 2021-09-08
+FEATURES:
+- Added custom AMI support for managed node groups ([#1473](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1473))
+
+
+<a name="v17.16.0"></a>
+## [v17.16.0] - 2021-09-08
+BUG FIXES:
+- Fixed coalescelist() with subnets in fargate module ([#1576](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1576))
+
+
+<a name="v17.15.0"></a>
+## [v17.15.0] - 2021-09-06
+FEATURES:
+- Added ability to pass different subnets for fargate and the cluster ([#1527](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1527))
+
+
+<a name="v17.14.0"></a>
+## [v17.14.0] - 2021-09-06
+FEATURES:
+- Create SG rule for each new cluster_endpoint_private_access_cidr block ([#1549](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1549))
+
+
+<a name="v17.13.0"></a>
+## [v17.13.0] - 2021-09-06
+BUG FIXES:
+- Worker security group handling when worker_create_security_group=false ([#1461](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1461))
+
+
+<a name="v17.12.0"></a>
+## [v17.12.0] - 2021-09-06
+FEATURES:
+- Add ability to tag network-interface using Launch Template ([#1563](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1563))
+
+
+<a name="v17.11.0"></a>
+## [v17.11.0] - 2021-09-04
+BUG FIXES:
+- Updated required version of AWS provider to 3.56.0 ([#1571](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1571))
+
+
+<a name="v17.10.0"></a>
+## [v17.10.0] - 2021-09-03
+FEATURES:
+- Added support for update_config in EKS managed node groups ([#1560](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1560))
+
+
+<a name="v17.9.0"></a>
+## [v17.9.0] - 2021-09-03
+FEATURES:
+- Allow override of timeouts in node_groups ([#1552](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1552))
+- Ability to tag just EKS cluster ([#1569](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1569))
+
+
+<a name="v17.8.0"></a>
+## [v17.8.0] - 2021-09-03
+BUG FIXES:
+- Put KubeletExtraArgs in double quotes for Windows ([#1082](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1082))
+
+
+<a name="v17.7.0"></a>
+## [v17.7.0] - 2021-09-02
+FEATURES:
+- Added throughput support for root and EBS disks ([#1445](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1445))
+
+
+<a name="v17.6.0"></a>
+## [v17.6.0] - 2021-08-31
+FEATURES:
+- Tags passed into worker_groups_launch_template extend var.tags for the volumes ([#1397](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1397))
+
+
+<a name="v17.5.0"></a>
+## [v17.5.0] - 2021-08-31
+FEATURES:
+- Allow users to add more Audiences to OpenID Connect ([#1451](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1451))
+
+
+<a name="v17.4.0"></a>
+## [v17.4.0] - 2021-08-27
+BUG FIXES:
+- Discourage usage of iam_policy_attachment in example ([#1529](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1529))
+- Allow instance `Name` tag to be overwritten ([#1538](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1538))
+
+DOCS:
+- Fix cluster-autoscaler tags in irsa example ([#1436](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1436))
+- Add missing comma to docs/iam-permissions.md ([#1437](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1437))
+- Updated autoscaling.md ([#1515](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1515))
+
+
+<a name="v17.3.0"></a>
+## [v17.3.0] - 2021-08-25
+BUG FIXES:
+- Fixed launch template version infinite plan issue and improved rolling updates ([#1447](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1447))
+
+
+<a name="v17.2.0"></a>
+## [v17.2.0] - 2021-08-25
+FEATURES:
+- Support for encrypted root disk in node_groups ([#1428](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1428))
+- Enable ebs_optimized setting for node_groups ([#1459](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1459))
 
 
 <a name="v17.1.0"></a>
-## [v17.1.0] - 2021-06-04
+## [v17.1.0] - 2021-06-09
 FEATURES:
 - Add support for Managed Node Groups (`node_groups`) taints ([#1424](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1424))
 - Allow to choose launch template version for Managed Node Groups when `create_launch_template` is set to `true` ([#1419](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1419))
@@ -67,7 +1645,7 @@ BREAKING CHANGES:
 - The  variable `config_output_path` is renamed into `kubeconfig_output_path` for naming consistency. Please upgrade your configuration accordingly.
 
 NOTES:
-- Since we now search only for Linux or Windows AMI if there is a worker groups for the corresponding plateform, we can now define different default root block device name for each plateform. Use locals `root_block_device_name` and `root_block_device_name_windows` to define your owns.
+- Since we now search only for Linux or Windows AMI if there is a worker groups for the corresponding platform, we can now define different default root block device name for each platform. Use locals `root_block_device_name` and `root_block_device_name_windows` to define your owns.
 - The kubeconfig file permission is not world and group readable anymore. The default permission is now `600`. This value can be changed with the variable `var.kubeconfig_file_permission`.
 
 
@@ -106,7 +1684,7 @@ BUG FIXES:
 - Bump `terraform-aws-modules/http` provider version to support darwin arm64 release ([#1369](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1369))
 
 DOCS:
-- Use IRSA for Node Termination Handler IAM policy attachement in Instance Refresh example ([#1373](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1373))
+- Use IRSA for Node Termination Handler IAM policy attachment in Instance Refresh example ([#1373](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1373))
 
 
 <a name="v16.0.0"></a>
@@ -179,7 +1757,7 @@ FEATURES:
 - Add permissions boundary to fargate execution IAM role ([#1108](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1108))
 
 ENHANCEMENTS:
-- Dont set -x in userdata to avoid printing sensitive informations in logs ([#1187](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1187))
+- Don't set -x in userdata to avoid printing sensitive information in logs ([#1187](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1187))
 
 BUG FIXES:
 - Merge tags from Fargate profiles with common tags from cluster ([#1159](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1159))
@@ -291,7 +1869,7 @@ BREAKING CHANGES:
 NOTES:
 - `credit_specification` for worker groups launch template can now be set to `null` so that we can use non burstable EC2 families
 - Starting in v12.1.0 the `cluster_id` output depends on the
-`wait_for_cluster` null resource. This means that initialisation of the
+`wait_for_cluster` null resource. This means that initialization of the
 kubernetes provider will be blocked until the cluster is really ready,
 if the module is set to manage the aws_auth ConfigMap and user followed
 the typical Usage Example. kubernetes resources in the same plan do not
@@ -397,10 +1975,32 @@ TESTS:
 - Remove unused kitchen test related stuff ([#787](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/787))
 
 CI:
-- Restrict sementic PR to validate PR title only ([#804](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/804))
-
-
-[Unreleased]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.1.0...HEAD
+- Restrict semantic PR to validate PR title only ([#804](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/804))
+
+
+[Unreleased]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.23.0...HEAD
+[v17.23.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.22.0...v17.23.0
+[v17.22.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.21.0...v17.22.0
+[v17.21.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.20.0...v17.21.0
+[v17.20.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.19.0...v17.20.0
+[v17.19.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.18.0...v17.19.0
+[v17.18.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.17.0...v17.18.0
+[v17.17.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.16.0...v17.17.0
+[v17.16.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.15.0...v17.16.0
+[v17.15.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.14.0...v17.15.0
+[v17.14.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.13.0...v17.14.0
+[v17.13.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.12.0...v17.13.0
+[v17.12.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.11.0...v17.12.0
+[v17.11.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.10.0...v17.11.0
+[v17.10.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.9.0...v17.10.0
+[v17.9.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.8.0...v17.9.0
+[v17.8.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.7.0...v17.8.0
+[v17.7.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.6.0...v17.7.0
+[v17.6.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.5.0...v17.6.0
+[v17.5.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.4.0...v17.5.0
+[v17.4.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.3.0...v17.4.0
+[v17.3.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.2.0...v17.3.0
+[v17.2.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.1.0...v17.2.0
 [v17.1.0]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.0.3...v17.1.0
 [v17.0.3]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.0.2...v17.0.3
 [v17.0.2]: https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v17.0.1...v17.0.2
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 5120cc143a..0000000000
--- a/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-.PHONY: changelog release
-
-SEMTAG=tools/semtag
-
-CHANGELOG_FILE=CHANGELOG.md
-TAG_QUERY=v11.0.0..
-
-scope ?= "minor"
-
-changelog-unrelease:
-	git-chglog --no-case -o $(CHANGELOG_FILE) $(TAG_QUERY)
-
-changelog:
-	git-chglog --no-case -o $(CHANGELOG_FILE) --next-tag `$(SEMTAG) final -s $(scope) -o -f` $(TAG_QUERY)
-
-release:
-	$(SEMTAG) final -s $(scope)
diff --git a/README.md b/README.md
index 45b06aaeba..fec86afc14 100644
--- a/README.md
+++ b/README.md
@@ -1,327 +1,538 @@
-# terraform-aws-eks
+# AWS EKS Terraform module
 
-[![Lint Status](https://github.com/terraform-aws-modules/terraform-aws-eks/workflows/Lint/badge.svg)](https://github.com/terraform-aws-modules/terraform-aws-eks/actions)
-[![LICENSE](https://img.shields.io/github/license/terraform-aws-modules/terraform-aws-eks)](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/LICENSE)
+Terraform module which creates Amazon EKS (Kubernetes) resources
 
-A terraform module to create a managed Kubernetes cluster on AWS EKS. Available
-through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws).
-Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html)
-and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started).
-Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html).
+[![SWUbanner](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg)](https://github.com/vshymanskyy/StandWithUkraine/blob/main/docs/README.md)
 
-## Assumptions
+## [Documentation](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs)
 
-* You want to create an EKS cluster and an autoscaling group of workers for the cluster.
-* You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module.
-* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources. The VPC satisfies [EKS requirements](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html).
+- [Frequently Asked Questions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md)
+- [Compute Resources](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md)
+- [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md)
+- [Network Connectivity](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/network_connectivity.md)
+- Upgrade Guides
+  - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md)
+  - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md)
+  - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md)
+  - [Upgrade to v20.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md)
 
-## Important note
+### External Documentation
 
-The `cluster_version` is the required variable. Kubernetes is evolving a lot, and each major version includes new features, fixes, or changes.
+Please note that we strive to provide a comprehensive suite of documentation for __*configuring and utilizing the module(s)*__ defined here, and that documentation regarding EKS (including EKS managed node group, self managed node group, and Fargate profile) and/or Kubernetes features, usage, etc. are better left up to their respective sources:
 
-**Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version.**
+- [AWS EKS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
+- [Kubernetes Documentation](https://kubernetes.io/docs/home/)
 
-You also need to ensure your applications and add ons are updated, or workloads could fail after the upgrade is complete. For action, you may need to take before upgrading, see the steps in the [EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html).
+## Usage
 
-An example of harming update was the removal of several commonly used, but deprecated  APIs, in Kubernetes 1.16. More information on the API removals, see the [Kubernetes blog post](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/).
+### EKS Auto Mode
 
-By default, this module manages the `aws-auth` configmap for you (`manage_aws_auth=true`). To avoid the following [issue](https://github.com/aws/containers-roadmap/issues/654) where the EKS creation is `ACTIVE` but not ready. We implemented a "retry" logic with a fork of the http provider https://github.com/terraform-aws-modules/terraform-provider-http. This fork adds the support of a self-signed CA certificate. The original PR can be found at https://github.com/hashicorp/terraform-provider-http/pull/29.
+```hcl
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
 
-Setting `instance_refresh_enabled` to true will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. Find the complete example here [instance_refresh](examples/instance_refresh).
+  name               = "example"
+  kubernetes_version = "1.33"
 
-## Usage example
+  # Optional
+  endpoint_public_access = true
 
-A full example leveraging other community modules is contained in the [examples/basic directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
+  # Optional: Adds the current caller identity as an administrator via cluster access entry
+  enable_cluster_creator_admin_permissions = true
 
-```hcl
-data "aws_eks_cluster" "cluster" {
-  name = module.my-cluster.cluster_id
-}
+  compute_config = {
+    enabled    = true
+    node_pools = ["general-purpose"]
+  }
 
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.my-cluster.cluster_id
-}
+  vpc_id     = "vpc-1234556abcdef"
+  subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
 
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-  version                = "~> 1.9"
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
 }
+```
+
+### EKS Managed Node Group
+
+```hcl
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = "my-cluster"
+  kubernetes_version = "1.33"
+
+  addons = {
+    coredns                = {}
+    eks-pod-identity-agent = {
+      before_compute = true
+    }
+    kube-proxy             = {}
+    vpc-cni                = {
+      before_compute = true
+    }
+  }
+
+  # Optional
+  endpoint_public_access = true
+
+  # Optional: Adds the current caller identity as an administrator via cluster access entry
+  enable_cluster_creator_admin_permissions = true
+
+  vpc_id                   = "vpc-1234556abcdef"
+  subnet_ids               = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+  control_plane_subnet_ids = ["subnet-xyzde987", "subnet-slkjf456", "subnet-qeiru789"]
+
+  # EKS Managed Node Group(s)
+  eks_managed_node_groups = {
+    example = {
+      # Starting on 1.30, AL2023 is the default AMI type for EKS managed node groups
+      ami_type       = "AL2023_x86_64_STANDARD"
+      instance_types = ["m5.xlarge"]
 
-module "my-cluster" {
-  source          = "terraform-aws-modules/eks/aws"
-  cluster_name    = "my-cluster"
-  cluster_version = "1.17"
-  subnets         = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
-  vpc_id          = "vpc-1234556abcdef"
-
-  worker_groups = [
-    {
-      instance_type = "m4.large"
-      asg_max_size  = 5
+      min_size     = 2
+      max_size     = 10
+      desired_size = 2
     }
-  ]
+  }
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
 }
 ```
-## Conditional creation
 
-Sometimes you need to have a way to create EKS resources conditionally but Terraform does not allow to use `count` inside `module` block, so the solution is to specify argument `create_eks`.
+### Cluster Access Entry
+
+When enabling `authentication_mode = "API_AND_CONFIG_MAP"`, EKS will automatically create an access entry for the IAM role(s) used by managed node group(s) and Fargate profile(s). There are no additional actions required by users. For self-managed node groups and the Karpenter sub-module, this project automatically adds the access entry on behalf of users so there are no additional actions required by users.
 
-Using this feature _and_ having `manage_aws_auth=true` (the default) requires to set up the kubernetes provider in a way that allows the data sources to not exist.
+On clusters that were created prior to cluster access management (CAM) support, there will be an existing access entry for the cluster creator. This was previously not visible when using `aws-auth` ConfigMap, but will become visible when access entry is enabled.
 
 ```hcl
-data "aws_eks_cluster" "cluster" {
-  count = var.create_eks ? 1 : 0
-  name  = module.eks.cluster_id
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  # Truncated for brevity ...
+
+  access_entries = {
+    # One access entry with a policy associated
+    example = {
+      principal_arn = "arn:aws:iam::123456789012:role/something"
+
+      policy_associations = {
+        example = {
+          policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
+          access_scope = {
+            namespaces = ["default"]
+            type       = "namespace"
+          }
+        }
+      }
+    }
+  }
 }
+```
+
+### EKS Hybrid Nodes
 
-data "aws_eks_cluster_auth" "cluster" {
-  count = var.create_eks ? 1 : 0
-  name  = module.eks.cluster_id
+```hcl
+locals {
+  # RFC 1918 IP ranges supported
+  remote_network_cidr = "172.16.0.0/16"
+  remote_node_cidr    = cidrsubnet(local.remote_network_cidr, 2, 0)
+  remote_pod_cidr     = cidrsubnet(local.remote_network_cidr, 2, 1)
 }
 
-# In case of not creating the cluster, this will be an incompletely configured, unused provider, which poses no problem.
-provider "kubernetes" {
-  host                   = element(concat(data.aws_eks_cluster.cluster[*].endpoint, [""]), 0)
-  cluster_ca_certificate = base64decode(element(concat(data.aws_eks_cluster.cluster[*].certificate_authority.0.data, [""]), 0))
-  token                  = element(concat(data.aws_eks_cluster_auth.cluster[*].token, [""]), 0)
-  load_config_file       = false
-  version                = "1.10"
+# SSM and IAM Roles Anywhere supported - SSM is default
+module "eks_hybrid_node_role" {
+  source  = "terraform-aws-modules/eks/aws//modules/hybrid-node-role"
+  version = "~> 21.0"
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
 }
 
-# This cluster will not be created
 module "eks" {
-  source = "terraform-aws-modules/eks/aws"
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = "example"
+  kubernetes_version = "1.33"
+
+  addons = {
+    coredns                = {}
+    eks-pod-identity-agent = {}
+    kube-proxy             = {}
+  }
+
+  # Optional
+  endpoint_public_access = true
+
+  # Optional: Adds the current caller identity as an administrator via cluster access entry
+  enable_cluster_creator_admin_permissions = true
+
+  create_node_security_group = false
+  security_group_additional_rules = {
+    hybrid-all = {
+      cidr_blocks = [local.remote_network_cidr]
+      description = "Allow all traffic from remote node/pod network"
+      from_port   = 0
+      to_port     = 0
+      protocol    = "all"
+      type        = "ingress"
+    }
+  }
+
+  # Optional
+  compute_config = {
+    enabled    = true
+    node_pools = ["system"]
+  }
+
+  access_entries = {
+    hybrid-node-role = {
+      principal_arn = module.eks_hybrid_node_role.arn
+      type          = "HYBRID_LINUX"
+    }
+  }
+
+  vpc_id     = "vpc-1234556abcdef"
+  subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+  remote_network_config = {
+    remote_node_networks = {
+      cidrs = [local.remote_node_cidr]
+    }
+    # Required if running webhooks on Hybrid nodes
+    remote_pod_networks = {
+      cidrs = [local.remote_pod_cidr]
+    }
+  }
 
-  create_eks = false
-  # ... omitted
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
 }
 ```
 
-## Other documentation
+### Bootstrap Cluster Creator Admin Permissions
 
-* [Autoscaling](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/autoscaling.md): How to enable worker node autoscaling.
-* [Enable Docker Bridge Network](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/enable-docker-bridge-network.md): How to enable the docker bridge network when using the EKS-optimized AMI, which disables it by default.
-* [Spot instances](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/spot-instances.md): How to use spot instances with this module.
-* [IAM Permissions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md): Minimum IAM permissions needed to setup EKS Cluster.
-* [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md): Frequently Asked Questions
+Setting the `bootstrap_cluster_creator_admin_permissions` is a one time operation when the cluster is created; it cannot be modified later through the EKS API. In this project we are hardcoding this to `false`. If users wish to achieve the same functionality, we will do that through an access entry which can be enabled or disabled at any time of their choosing using the variable `enable_cluster_creator_admin_permissions`
 
-## Doc generation
+### Enabling EFA Support
 
-Code formatting and documentation for variables and outputs is generated using [pre-commit-terraform hooks](https://github.com/antonbabenko/pre-commit-terraform) which uses [terraform-docs](https://github.com/segmentio/terraform-docs).
+When enabling EFA support via `enable_efa_support = true`, there are two locations this can be specified - one at the cluster level, and one at the node group level. Enabling at the cluster level will add the EFA required ingress/egress rules to the shared security group created for the node group(s). Enabling at the node group level will do the following (per node group where enabled):
 
-Follow [these instructions](https://github.com/antonbabenko/pre-commit-terraform#how-to-install) to install pre-commit locally.
+1. All EFA interfaces supported by the instance will be exposed on the launch template used by the node group
+2. A placement group with `strategy = "clustered"` per EFA requirements is created and passed to the launch template used by the node group
+3. Data sources will reverse lookup the availability zones that support the instance type selected based on the subnets provided, ensuring that only the associated subnets are passed to the launch template and therefore used by the placement group. This avoids the placement group being created in an availability zone that does not support the instance type selected.
 
-And install `terraform-docs` with `go get github.com/segmentio/terraform-docs` or `brew install terraform-docs`.
+> [!TIP]
+> Use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request the interfaces be mounted to their containers.
+>
+> The EKS AL2 GPU AMI comes with the necessary EFA components pre-installed - you just need to expose the EFA devices on the nodes via their launch templates, ensure the required EFA security group rules are in place, and deploy the `aws-efa-k8s-device-plugin` in order to start utilizing EFA within your cluster. Your application container will need to have the necessary libraries and runtime in order to utilize communication over the EFA interfaces (NCCL, aws-ofi-nccl, hwloc, libfabric, aws-neuornx-collectives, CUDA, etc.).
 
-## Contributing
+If you disable the creation and use of the managed node group custom launch template (`create_launch_template = false` and/or `use_custom_launch_template = false`), this will interfere with the EFA functionality provided. In addition, if you do not supply an `instance_type` for self-managed node group(s), or `instance_types` for the managed node group(s), this will also interfere with the functionality. In order to support the EFA functionality provided by `enable_efa_support = true`, you must utilize the custom launch template created/provided by this module, and supply an `instance_type`/`instance_types` for the respective node group.
 
-Report issues/questions/feature requests on in the [issues](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/new) section.
+The logic behind supporting EFA uses a data source to lookup the instance type to retrieve the number of interfaces that the instance supports in order to enumerate and expose those interfaces on the launch template created. For managed node groups where a list of instance types are supported, the first instance type in the list is used to calculate the number of EFA interfaces supported. Mixing instance types with varying number of interfaces is not recommended for EFA (or in some cases, mixing instance types is not supported - i.e. - p5.48xlarge and p4d.24xlarge). In addition to exposing the EFA interfaces and updating the security group rules, a placement group is created per the EFA requirements and only the availability zones that support the instance type selected are used in the subnets provided to the node group.
 
-Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md).
+In order to enable EFA support, you will have to specify `enable_efa_support = true` on both the cluster and each node group that you wish to enable EFA support for:
 
-## Change log
+```hcl
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  # Truncated for brevity ...
+
+  # Adds the EFA required security group rules to the shared
+  # security group created for the node group(s)
+  enable_efa_support = true
+
+  eks_managed_node_groups = {
+    example = {
+      # The EKS AL2023 NVIDIA AMI provides all of the necessary components
+      # for accelerated workloads w/ EFA
+      ami_type       = "AL2023_x86_64_NVIDIA"
+      instance_types = ["p5.48xlarge"]
+
+      # Exposes all EFA interfaces on the launch template created by the node group(s)
+      # This would expose all 32 EFA interfaces for the p5.48xlarge instance type
+      enable_efa_support = true
+
+      # Mount instance store volumes in RAID-0 for kubelet and containerd
+      # https://github.com/awslabs/amazon-eks-ami/blob/master/doc/USER_GUIDE.md#raid-0-for-kubelet-and-containerd-raid0
+      cloudinit_pre_nodeadm = [
+        {
+          content_type = "application/node.eks.aws"
+          content      = <<-EOT
+            ---
+            apiVersion: node.eks.aws/v1alpha1
+            kind: NodeConfig
+            spec:
+              instance:
+                localStorage:
+                  strategy: RAID0
+          EOT
+        }
+      ]
+
+      # EFA should only be enabled when connecting 2 or more nodes
+      # Do not use EFA on a single node workload
+      min_size     = 2
+      max_size     = 10
+      desired_size = 2
+    }
+  }
+}
+```
 
-- The [changelog](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) captures all important release notes from v11.0.0
-- For older release notes, refer to [changelog.pre-v11.0.0.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.pre-v11.0.0.md)
+## Examples
 
-## Authors
+- [EKS Auto Mode](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks-auto-mode): EKS Cluster with EKS Auto Mode
+- [EKS Hybrid Nodes](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks-hybrid-nodes): EKS Cluster with EKS Hybrid nodes
+- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks-managed-node-group): EKS Cluster with EKS managed node group(s)
+- [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for intelligent data plane management
+- [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self-managed-node-group): EKS Cluster with self-managed node group(s)
 
-Created by [Brandon O'Connor](https://github.com/brandoconnor) - brandon@atscale.run.
-Maintained by [Max Williams](https://github.com/max-rocket-internet) and [Thierno IB. BARRY](https://github.com/barryib).
-Many thanks to [the contributors listed here](https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors)!
+## Contributing
 
-## License
+We are grateful to the community for contributing bugfixes and improvements! Please see below to learn how you can take part.
 
-Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details.
+- [Code of Conduct](https://github.com/terraform-aws-modules/.github/blob/master/CODE_OF_CONDUCT.md)
+- [Contributing Guide](https://github.com/terraform-aws-modules/.github/blob/master/CONTRIBUTING.md)
 
-<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
+<!-- BEGIN_TF_DOCS -->
 ## Requirements
 
 | Name | Version |
 |------|---------|
-| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
-| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.40.0 |
-| <a name="requirement_http"></a> [http](#requirement\_http) | >= 2.4.1 |
-| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
-| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+| <a name="requirement_time"></a> [time](#requirement\_time) | >= 0.9 |
+| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 4.0 |
 
 ## Providers
 
 | Name | Version |
 |------|---------|
-| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.40.0 |
-| <a name="provider_http"></a> [http](#provider\_http) | >= 2.4.1 |
-| <a name="provider_kubernetes"></a> [kubernetes](#provider\_kubernetes) | >= 1.11.1 |
-| <a name="provider_local"></a> [local](#provider\_local) | >= 1.4 |
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+| <a name="provider_time"></a> [time](#provider\_time) | >= 0.9 |
+| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 4.0 |
 
 ## Modules
 
 | Name | Source | Version |
 |------|--------|---------|
-| <a name="module_fargate"></a> [fargate](#module\_fargate) | ./modules/fargate |  |
-| <a name="module_node_groups"></a> [node\_groups](#module\_node\_groups) | ./modules/node_groups |  |
+| <a name="module_eks_managed_node_group"></a> [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a |
+| <a name="module_fargate_profile"></a> [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a |
+| <a name="module_kms"></a> [kms](#module\_kms) | terraform-aws-modules/kms/aws | 4.0.0 |
+| <a name="module_self_managed_node_group"></a> [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a |
 
 ## Resources
 
 | Name | Type |
 |------|------|
-| [aws_autoscaling_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
-| [aws_autoscaling_group.workers_launch_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
 | [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
+| [aws_ec2_tag.cluster_primary_security_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_tag) | resource |
+| [aws_eks_access_entry.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource |
+| [aws_eks_access_policy_association.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_policy_association) | resource |
+| [aws_eks_addon.before_compute](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource |
+| [aws_eks_addon.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource |
 | [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource |
-| [aws_iam_instance_profile.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
-| [aws_iam_instance_profile.workers_launch_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_eks_identity_provider_config.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_identity_provider_config) | resource |
 | [aws_iam_openid_connect_provider.oidc_provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource |
-| [aws_iam_policy.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_role.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.workers_additional_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_launch_configuration.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration) | resource |
-| [aws_launch_template.workers_launch_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_iam_policy.cluster_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.custom](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.eks_auto](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.cluster_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.custom](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.eks_auto](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.eks_auto_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
 | [aws_security_group.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
-| [aws_security_group_rule.cluster_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_https_worker_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_primary_ingress_workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_cidrs_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.cluster_private_access_sg_source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_egress_internet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_kubelet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_cluster_primary](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [aws_security_group_rule.workers_ingress_self](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
-| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
-| [local_file.kubeconfig](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
-| [aws_ami.eks_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
-| [aws_ami.eks_worker_windows](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [time_sleep.this](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource |
 | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_iam_instance_profile.custom_worker_group_iam_instance_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_instance_profile) | data source |
-| [aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_instance_profile) | data source |
-| [aws_iam_policy_document.cluster_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_elb_sl_role_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.workers_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_role.custom_cluster_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
+| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.custom](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.node_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_session_context.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_session_context) | data source |
 | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [http_http.wait_for_cluster](https://registry.terraform.io/providers/terraform-aws-modules/http/latest/docs/data-sources/http) | data source |
+| [tls_certificate.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source |
 
 ## Inputs
 
 | Name | Description | Type | Default | Required |
 |------|-------------|------|---------|:--------:|
-| <a name="input_attach_worker_cni_policy"></a> [attach\_worker\_cni\_policy](#input\_attach\_worker\_cni\_policy) | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster. | `bool` | `true` | no |
-| <a name="input_aws_auth_additional_labels"></a> [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no |
-| <a name="input_cluster_create_endpoint_private_access_sg_rule"></a> [cluster\_create\_endpoint\_private\_access\_sg\_rule](#input\_cluster\_create\_endpoint\_private\_access\_sg\_rule) | Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted. | `bool` | `false` | no |
-| <a name="input_cluster_create_security_group"></a> [cluster\_create\_security\_group](#input\_cluster\_create\_security\_group) | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`. | `bool` | `true` | no |
-| <a name="input_cluster_create_timeout"></a> [cluster\_create\_timeout](#input\_cluster\_create\_timeout) | Timeout value when creating the EKS cluster. | `string` | `"30m"` | no |
-| <a name="input_cluster_delete_timeout"></a> [cluster\_delete\_timeout](#input\_cluster\_delete\_timeout) | Timeout value when deleting the EKS cluster. | `string` | `"15m"` | no |
-| <a name="input_cluster_egress_cidrs"></a> [cluster\_egress\_cidrs](#input\_cluster\_egress\_cidrs) | List of CIDR blocks that are permitted for cluster egress traffic. | `list(string)` | <pre>[<br>  "0.0.0.0/0"<br>]</pre> | no |
-| <a name="input_cluster_enabled_log_types"></a> [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | `[]` | no |
-| <a name="input_cluster_encryption_config"></a> [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster. See examples/secrets\_encryption/main.tf for example format | <pre>list(object({<br>    provider_key_arn = string<br>    resources        = list(string)<br>  }))</pre> | `[]` | no |
-| <a name="input_cluster_endpoint_private_access"></a> [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled. | `bool` | `false` | no |
-| <a name="input_cluster_endpoint_private_access_cidrs"></a> [cluster\_endpoint\_private\_access\_cidrs](#input\_cluster\_endpoint\_private\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
-| <a name="input_cluster_endpoint_private_access_sg"></a> [cluster\_endpoint\_private\_access\_sg](#input\_cluster\_endpoint\_private\_access\_sg) | List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`. | `list(string)` | `null` | no |
-| <a name="input_cluster_endpoint_public_access"></a> [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`. | `bool` | `true` | no |
-| <a name="input_cluster_endpoint_public_access_cidrs"></a> [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint. | `list(string)` | <pre>[<br>  "0.0.0.0/0"<br>]</pre> | no |
-| <a name="input_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | IAM role name for the cluster. If manage\_cluster\_iam\_resources is set to false, set this to reuse an existing IAM role. If manage\_cluster\_iam\_resources is set to true, set this to force the created role name. | `string` | `""` | no |
-| <a name="input_cluster_log_kms_key_id"></a> [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no |
-| <a name="input_cluster_log_retention_in_days"></a> [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no |
-| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. Also used as a prefix in names of related resources. | `string` | n/a | yes |
-| <a name="input_cluster_security_group_id"></a> [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no |
-| <a name="input_cluster_service_ipv4_cidr"></a> [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no |
-| <a name="input_cluster_version"></a> [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for the EKS cluster. | `string` | n/a | yes |
-| <a name="input_create_eks"></a> [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| <a name="input_create_fargate_pod_execution_role"></a> [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created. | `bool` | `true` | no |
-| <a name="input_eks_oidc_root_ca_thumbprint"></a> [eks\_oidc\_root\_ca\_thumbprint](#input\_eks\_oidc\_root\_ca\_thumbprint) | Thumbprint of Root CA for EKS OIDC, Valid until 2037 | `string` | `"9e99a48a9960b14926bb7f3b02e22da2b0ab7280"` | no |
-| <a name="input_enable_irsa"></a> [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no |
-| <a name="input_fargate_pod_execution_role_name"></a> [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
-| <a name="input_fargate_profiles"></a> [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details | `any` | `{}` | no |
-| <a name="input_iam_path"></a> [iam\_path](#input\_iam\_path) | If provided, all IAM roles will be created on this path. | `string` | `"/"` | no |
-| <a name="input_kubeconfig_aws_authenticator_additional_args"></a> [kubeconfig\_aws\_authenticator\_additional\_args](#input\_kubeconfig\_aws\_authenticator\_additional\_args) | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"]. | `list(string)` | `[]` | no |
-| <a name="input_kubeconfig_aws_authenticator_command"></a> [kubeconfig\_aws\_authenticator\_command](#input\_kubeconfig\_aws\_authenticator\_command) | Command to use to fetch AWS EKS credentials. | `string` | `"aws-iam-authenticator"` | no |
-| <a name="input_kubeconfig_aws_authenticator_command_args"></a> [kubeconfig\_aws\_authenticator\_command\_args](#input\_kubeconfig\_aws\_authenticator\_command\_args) | Default arguments passed to the authenticator command. Defaults to [token -i $cluster\_name]. | `list(string)` | `[]` | no |
-| <a name="input_kubeconfig_aws_authenticator_env_variables"></a> [kubeconfig\_aws\_authenticator\_env\_variables](#input\_kubeconfig\_aws\_authenticator\_env\_variables) | Environment variables that should be used when executing the authenticator. e.g. { AWS\_PROFILE = "eks"}. | `map(string)` | `{}` | no |
-| <a name="input_kubeconfig_file_permission"></a> [kubeconfig\_file\_permission](#input\_kubeconfig\_file\_permission) | File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.` | `string` | `"0600"` | no |
-| <a name="input_kubeconfig_name"></a> [kubeconfig\_name](#input\_kubeconfig\_name) | Override the default name used for items kubeconfig. | `string` | `""` | no |
-| <a name="input_kubeconfig_output_path"></a> [kubeconfig\_output\_path](#input\_kubeconfig\_output\_path) | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`. | `string` | `"./"` | no |
-| <a name="input_manage_aws_auth"></a> [manage\_aws\_auth](#input\_manage\_aws\_auth) | Whether to apply the aws-auth configmap file. | `bool` | `true` | no |
-| <a name="input_manage_cluster_iam_resources"></a> [manage\_cluster\_iam\_resources](#input\_manage\_cluster\_iam\_resources) | Whether to let the module manage cluster IAM resources. If set to false, cluster\_iam\_role\_name must be specified. | `bool` | `true` | no |
-| <a name="input_manage_worker_iam_resources"></a> [manage\_worker\_iam\_resources](#input\_manage\_worker\_iam\_resources) | Whether to let the module manage worker IAM resources. If set to false, iam\_instance\_profile\_name must be specified for workers. | `bool` | `true` | no |
-| <a name="input_map_accounts"></a> [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | `list(string)` | `[]` | no |
-| <a name="input_map_roles"></a> [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | <pre>list(object({<br>    rolearn  = string<br>    username = string<br>    groups   = list(string)<br>  }))</pre> | `[]` | no |
-| <a name="input_map_users"></a> [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | <pre>list(object({<br>    userarn  = string<br>    username = string<br>    groups   = list(string)<br>  }))</pre> | `[]` | no |
-| <a name="input_node_groups"></a> [node\_groups](#input\_node\_groups) | Map of map of node groups to create. See `node_groups` module's documentation for more details | `any` | `{}` | no |
-| <a name="input_node_groups_defaults"></a> [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no |
-| <a name="input_permissions_boundary"></a> [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| <a name="input_subnets"></a> [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | n/a | yes |
-| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only. | `map(string)` | `{}` | no |
-| <a name="input_vpc_id"></a> [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | n/a | yes |
-| <a name="input_wait_for_cluster_timeout"></a> [wait\_for\_cluster\_timeout](#wait\_for\_cluster\_timeout) | Allows for a configurable timeout (in seconds) when waiting for a cluster to come up | `number` | `300` | no |
-| <a name="input_worker_additional_security_group_ids"></a> [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| <a name="input_worker_ami_name_filter"></a> [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
-| <a name="input_worker_ami_name_filter_windows"></a> [worker\_ami\_name\_filter\_windows](#input\_worker\_ami\_name\_filter\_windows) | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
-| <a name="input_worker_ami_owner_id"></a> [worker\_ami\_owner\_id](#input\_worker\_ami\_owner\_id) | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"amazon"` | no |
-| <a name="input_worker_ami_owner_id_windows"></a> [worker\_ami\_owner\_id\_windows](#input\_worker\_ami\_owner\_id\_windows) | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"amazon"` | no |
-| <a name="input_worker_create_cluster_primary_security_group_rules"></a> [worker\_create\_cluster\_primary\_security\_group\_rules](#input\_worker\_create\_cluster\_primary\_security\_group\_rules) | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group. | `bool` | `false` | no |
-| <a name="input_worker_create_initial_lifecycle_hooks"></a> [worker\_create\_initial\_lifecycle\_hooks](#input\_worker\_create\_initial\_lifecycle\_hooks) | Whether to create initial lifecycle hooks provided in worker groups. | `bool` | `false` | no |
-| <a name="input_worker_create_security_group"></a> [worker\_create\_security\_group](#input\_worker\_create\_security\_group) | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | `bool` | `true` | no |
-| <a name="input_worker_groups"></a> [worker\_groups](#input\_worker\_groups) | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
-| <a name="input_worker_groups_launch_template"></a> [worker\_groups\_launch\_template](#input\_worker\_groups\_launch\_template) | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |
-| <a name="input_worker_security_group_id"></a> [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| <a name="input_worker_sg_ingress_from_port"></a> [worker\_sg\_ingress\_from\_port](#input\_worker\_sg\_ingress\_from\_port) | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | `number` | `1025` | no |
-| <a name="input_workers_additional_policies"></a> [workers\_additional\_policies](#input\_workers\_additional\_policies) | Additional policies to be added to workers | `list(string)` | `[]` | no |
-| <a name="input_workers_egress_cidrs"></a> [workers\_egress\_cidrs](#input\_workers\_egress\_cidrs) | List of CIDR blocks that are permitted for workers egress traffic. | `list(string)` | <pre>[<br>  "0.0.0.0/0"<br>]</pre> | no |
-| <a name="input_workers_group_defaults"></a> [workers\_group\_defaults](#input\_workers\_group\_defaults) | Override default values for target groups. See workers\_group\_defaults\_defaults in local.tf for valid keys. | `any` | `{}` | no |
-| <a name="input_workers_role_name"></a> [workers\_role\_name](#input\_workers\_role\_name) | User defined workers role name. | `string` | `""` | no |
-| <a name="input_write_kubeconfig"></a> [write\_kubeconfig](#input\_write\_kubeconfig) | Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`. | `bool` | `true` | no |
+| <a name="input_access_entries"></a> [access\_entries](#input\_access\_entries) | Map of access entries to add to the cluster | <pre>map(object({<br/>    # Access entry<br/>    kubernetes_groups = optional(list(string))<br/>    principal_arn     = string<br/>    type              = optional(string, "STANDARD")<br/>    user_name         = optional(string)<br/>    tags              = optional(map(string), {})<br/>    # Access policy association<br/>    policy_associations = optional(map(object({<br/>      policy_arn = string<br/>      access_scope = object({<br/>        namespaces = optional(list(string))<br/>        type       = string<br/>      })<br/>    })))<br/>  }))</pre> | `{}` | no |
+| <a name="input_additional_security_group_ids"></a> [additional\_security\_group\_ids](#input\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no |
+| <a name="input_addons"></a> [addons](#input\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | <pre>map(object({<br/>    name                 = optional(string) # will fall back to map key<br/>    before_compute       = optional(bool, false)<br/>    most_recent          = optional(bool, true)<br/>    addon_version        = optional(string)<br/>    configuration_values = optional(string)<br/>    pod_identity_association = optional(list(object({<br/>      role_arn        = string<br/>      service_account = string<br/>    })))<br/>    preserve                    = optional(bool, true)<br/>    resolve_conflicts_on_create = optional(string, "NONE")<br/>    resolve_conflicts_on_update = optional(string, "OVERWRITE")<br/>    service_account_role_arn    = optional(string)<br/>    timeouts = optional(object({<br/>      create = optional(string)<br/>      update = optional(string)<br/>      delete = optional(string)<br/>    }))<br/>    tags = optional(map(string), {})<br/>  }))</pre> | `null` | no |
+| <a name="input_addons_timeouts"></a> [addons\_timeouts](#input\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | <pre>object({<br/>    create = optional(string)<br/>    update = optional(string)<br/>    delete = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_attach_encryption_policy"></a> [attach\_encryption\_policy](#input\_attach\_encryption\_policy) | Indicates whether or not to attach an additional policy for the cluster IAM role to utilize the encryption key provided | `bool` | `true` | no |
+| <a name="input_authentication_mode"></a> [authentication\_mode](#input\_authentication\_mode) | The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` | `string` | `"API_AND_CONFIG_MAP"` | no |
+| <a name="input_cloudwatch_log_group_class"></a> [cloudwatch\_log\_group\_class](#input\_cloudwatch\_log\_group\_class) | Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS` | `string` | `null` | no |
+| <a name="input_cloudwatch_log_group_kms_key_id"></a> [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
+| <a name="input_cloudwatch_log_group_retention_in_days"></a> [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
+| <a name="input_cloudwatch_log_group_tags"></a> [cloudwatch\_log\_group\_tags](#input\_cloudwatch\_log\_group\_tags) | A map of additional tags to add to the cloudwatch log group created | `map(string)` | `{}` | no |
+| <a name="input_cluster_tags"></a> [cluster\_tags](#input\_cluster\_tags) | A map of additional tags to add to the cluster | `map(string)` | `{}` | no |
+| <a name="input_compute_config"></a> [compute\_config](#input\_compute\_config) | Configuration block for the cluster compute configuration | <pre>object({<br/>    enabled       = optional(bool, false)<br/>    node_pools    = optional(list(string))<br/>    node_role_arn = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_control_plane_subnet_ids"></a> [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane | `list(string)` | `[]` | no |
+| <a name="input_create"></a> [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no |
+| <a name="input_create_cloudwatch_log_group"></a> [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
+| <a name="input_create_cni_ipv6_iam_policy"></a> [create\_cni\_ipv6\_iam\_policy](#input\_create\_cni\_ipv6\_iam\_policy) | Determines whether to create an [`AmazonEKS_CNI_IPv6_Policy`](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy) | `bool` | `false` | no |
+| <a name="input_create_iam_role"></a> [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created for the cluster | `bool` | `true` | no |
+| <a name="input_create_kms_key"></a> [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `true` | no |
+| <a name="input_create_node_iam_role"></a> [create\_node\_iam\_role](#input\_create\_node\_iam\_role) | Determines whether an EKS Auto node IAM role is created | `bool` | `true` | no |
+| <a name="input_create_node_security_group"></a> [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
+| <a name="input_create_primary_security_group_tags"></a> [create\_primary\_security\_group\_tags](#input\_create\_primary\_security\_group\_tags) | Indicates whether or not to tag the cluster's primary security group. This security group is created by the EKS service, not the module, and therefore tagging is handled after cluster creation | `bool` | `true` | no |
+| <a name="input_create_security_group"></a> [create\_security\_group](#input\_create\_security\_group) | Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default | `bool` | `true` | no |
+| <a name="input_custom_oidc_thumbprints"></a> [custom\_oidc\_thumbprints](#input\_custom\_oidc\_thumbprints) | Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s) | `list(string)` | `[]` | no |
+| <a name="input_dataplane_wait_duration"></a> [dataplane\_wait\_duration](#input\_dataplane\_wait\_duration) | Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed node group(s), self-managed node group(s), Fargate profile(s)) | `string` | `"30s"` | no |
+| <a name="input_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | <pre>map(object({<br/>    create             = optional(bool)<br/>    kubernetes_version = optional(string)<br/><br/>    # EKS Managed Node Group<br/>    name                           = optional(string) # Will fall back to map key<br/>    use_name_prefix                = optional(bool)<br/>    subnet_ids                     = optional(list(string))<br/>    min_size                       = optional(number)<br/>    max_size                       = optional(number)<br/>    desired_size                   = optional(number)<br/>    ami_id                         = optional(string)<br/>    ami_type                       = optional(string)<br/>    ami_release_version            = optional(string)<br/>    use_latest_ami_release_version = optional(bool)<br/>    capacity_type                  = optional(string)<br/>    disk_size                      = optional(number)<br/>    force_update_version           = optional(bool)<br/>    instance_types                 = optional(list(string))<br/>    labels                         = optional(map(string))<br/>    node_repair_config = optional(object({<br/>      enabled = optional(bool)<br/>    }))<br/>    remote_access = optional(object({<br/>      ec2_ssh_key               = optional(string)<br/>      source_security_group_ids = optional(list(string))<br/>    }))<br/>    taints = optional(map(object({<br/>      key    = string<br/>      value  = optional(string)<br/>      effect = string<br/>    })))<br/>    update_config = optional(object({<br/>      max_unavailable            = optional(number)<br/>      max_unavailable_percentage = optional(number)<br/>    }))<br/>    timeouts = optional(object({<br/>      create = optional(string)<br/>      update = optional(string)<br/>      delete = optional(string)<br/>    }))<br/>    # User data<br/>    enable_bootstrap_user_data = optional(bool)<br/>    pre_bootstrap_user_data    = optional(string)<br/>    post_bootstrap_user_data   = optional(string)<br/>    bootstrap_extra_args       = optional(string)<br/>    user_data_template_path    = optional(string)<br/>    cloudinit_pre_nodeadm = optional(list(object({<br/>      content      = string<br/>      content_type = optional(string)<br/>      filename     = optional(string)<br/>      merge_type   = optional(string)<br/>    })))<br/>    cloudinit_post_nodeadm = optional(list(object({<br/>      content      = string<br/>      content_type = optional(string)<br/>      filename     = optional(string)<br/>      merge_type   = optional(string)<br/>    })))<br/>    # Launch Template<br/>    create_launch_template                 = optional(bool)<br/>    use_custom_launch_template             = optional(bool)<br/>    launch_template_id                     = optional(string)<br/>    launch_template_name                   = optional(string) # Will fall back to map key<br/>    launch_template_use_name_prefix        = optional(bool)<br/>    launch_template_version                = optional(string)<br/>    launch_template_default_version        = optional(string)<br/>    update_launch_template_default_version = optional(bool)<br/>    launch_template_description            = optional(string)<br/>    launch_template_tags                   = optional(map(string))<br/>    tag_specifications                     = optional(list(string))<br/>    ebs_optimized                          = optional(bool)<br/>    key_name                               = optional(string)<br/>    disable_api_termination                = optional(bool)<br/>    kernel_id                              = optional(string)<br/>    ram_disk_id                            = optional(string)<br/>    block_device_mappings = optional(map(object({<br/>      device_name = optional(string)<br/>      ebs = optional(object({<br/>        delete_on_termination      = optional(bool)<br/>        encrypted                  = optional(bool)<br/>        iops                       = optional(number)<br/>        kms_key_id                 = optional(string)<br/>        snapshot_id                = optional(string)<br/>        throughput                 = optional(number)<br/>        volume_initialization_rate = optional(number)<br/>        volume_size                = optional(number)<br/>        volume_type                = optional(string)<br/>      }))<br/>      no_device    = optional(string)<br/>      virtual_name = optional(string)<br/>    })))<br/>    capacity_reservation_specification = optional(object({<br/>      capacity_reservation_preference = optional(string)<br/>      capacity_reservation_target = optional(object({<br/>        capacity_reservation_id                 = optional(string)<br/>        capacity_reservation_resource_group_arn = optional(string)<br/>      }))<br/>    }))<br/>    cpu_options = optional(object({<br/>      amd_sev_snp      = optional(string)<br/>      core_count       = optional(number)<br/>      threads_per_core = optional(number)<br/>    }))<br/>    credit_specification = optional(object({<br/>      cpu_credits = optional(string)<br/>    }))<br/>    enclave_options = optional(object({<br/>      enabled = optional(bool)<br/>    }))<br/>    instance_market_options = optional(object({<br/>      market_type = optional(string)<br/>      spot_options = optional(object({<br/>        block_duration_minutes         = optional(number)<br/>        instance_interruption_behavior = optional(string)<br/>        max_price                      = optional(string)<br/>        spot_instance_type             = optional(string)<br/>        valid_until                    = optional(string)<br/>      }))<br/>    }))<br/>    license_specifications = optional(list(object({<br/>      license_configuration_arn = string<br/>    })))<br/>    metadata_options = optional(object({<br/>      http_endpoint               = optional(string)<br/>      http_protocol_ipv6          = optional(string)<br/>      http_put_response_hop_limit = optional(number)<br/>      http_tokens                 = optional(string)<br/>      instance_metadata_tags      = optional(string)<br/>    }))<br/>    enable_monitoring      = optional(bool)<br/>    enable_efa_support     = optional(bool)<br/>    enable_efa_only        = optional(bool)<br/>    efa_indices            = optional(list(string))<br/>    create_placement_group = optional(bool)<br/>    placement = optional(object({<br/>      affinity                = optional(string)<br/>      availability_zone       = optional(string)<br/>      group_name              = optional(string)<br/>      host_id                 = optional(string)<br/>      host_resource_group_arn = optional(string)<br/>      partition_number        = optional(number)<br/>      spread_domain           = optional(string)<br/>      tenancy                 = optional(string)<br/>    }))<br/>    network_interfaces = optional(list(object({<br/>      associate_carrier_ip_address = optional(bool)<br/>      associate_public_ip_address  = optional(bool)<br/>      connection_tracking_specification = optional(object({<br/>        tcp_established_timeout = optional(number)<br/>        udp_stream_timeout      = optional(number)<br/>        udp_timeout             = optional(number)<br/>      }))<br/>      delete_on_termination = optional(bool)<br/>      description           = optional(string)<br/>      device_index          = optional(number)<br/>      ena_srd_specification = optional(object({<br/>        ena_srd_enabled = optional(bool)<br/>        ena_srd_udp_specification = optional(object({<br/>          ena_srd_udp_enabled = optional(bool)<br/>        }))<br/>      }))<br/>      interface_type       = optional(string)<br/>      ipv4_address_count   = optional(number)<br/>      ipv4_addresses       = optional(list(string))<br/>      ipv4_prefix_count    = optional(number)<br/>      ipv4_prefixes        = optional(list(string))<br/>      ipv6_address_count   = optional(number)<br/>      ipv6_addresses       = optional(list(string))<br/>      ipv6_prefix_count    = optional(number)<br/>      ipv6_prefixes        = optional(list(string))<br/>      network_card_index   = optional(number)<br/>      network_interface_id = optional(string)<br/>      primary_ipv6         = optional(bool)<br/>      private_ip_address   = optional(string)<br/>      security_groups      = optional(list(string), [])<br/>      subnet_id            = optional(string)<br/>    })))<br/>    maintenance_options = optional(object({<br/>      auto_recovery = optional(string)<br/>    }))<br/>    private_dns_name_options = optional(object({<br/>      enable_resource_name_dns_aaaa_record = optional(bool)<br/>      enable_resource_name_dns_a_record    = optional(bool)<br/>      hostname_type                        = optional(string)<br/>    }))<br/>    # IAM role<br/>    create_iam_role               = optional(bool)<br/>    iam_role_arn                  = optional(string)<br/>    iam_role_name                 = optional(string)<br/>    iam_role_use_name_prefix      = optional(bool)<br/>    iam_role_path                 = optional(string)<br/>    iam_role_description          = optional(string)<br/>    iam_role_permissions_boundary = optional(string)<br/>    iam_role_tags                 = optional(map(string))<br/>    iam_role_attach_cni_policy    = optional(bool)<br/>    iam_role_additional_policies  = optional(map(string))<br/>    create_iam_role_policy        = optional(bool)<br/>    iam_role_policy_statements = optional(list(object({<br/>      sid           = optional(string)<br/>      actions       = optional(list(string))<br/>      not_actions   = optional(list(string))<br/>      effect        = optional(string)<br/>      resources     = optional(list(string))<br/>      not_resources = optional(list(string))<br/>      principals = optional(list(object({<br/>        type        = string<br/>        identifiers = list(string)<br/>      })))<br/>      not_principals = optional(list(object({<br/>        type        = string<br/>        identifiers = list(string)<br/>      })))<br/>      condition = optional(list(object({<br/>        test     = string<br/>        values   = list(string)<br/>        variable = string<br/>      })))<br/>    })))<br/>    # Security group<br/>    vpc_security_group_ids                = optional(list(string), [])<br/>    attach_cluster_primary_security_group = optional(bool, false)<br/>    cluster_primary_security_group_id     = optional(string)<br/>    create_security_group                 = optional(bool)<br/>    security_group_name                   = optional(string)<br/>    security_group_use_name_prefix        = optional(bool)<br/>    security_group_description            = optional(string)<br/>    security_group_ingress_rules = optional(map(object({<br/>      name                         = optional(string)<br/>      cidr_ipv4                    = optional(string)<br/>      cidr_ipv6                    = optional(string)<br/>      description                  = optional(string)<br/>      from_port                    = optional(string)<br/>      ip_protocol                  = optional(string)<br/>      prefix_list_id               = optional(string)<br/>      referenced_security_group_id = optional(string)<br/>      self                         = optional(bool)<br/>      tags                         = optional(map(string))<br/>      to_port                      = optional(string)<br/>    })))<br/>    security_group_egress_rules = optional(map(object({<br/>      name                         = optional(string)<br/>      cidr_ipv4                    = optional(string)<br/>      cidr_ipv6                    = optional(string)<br/>      description                  = optional(string)<br/>      from_port                    = optional(string)<br/>      ip_protocol                  = optional(string)<br/>      prefix_list_id               = optional(string)<br/>      referenced_security_group_id = optional(string)<br/>      self                         = optional(bool)<br/>      tags                         = optional(map(string))<br/>      to_port                      = optional(string)<br/>    })), {})<br/>    security_group_tags = optional(map(string))<br/><br/>    tags = optional(map(string))<br/>  }))</pre> | `null` | no |
+| <a name="input_enable_auto_mode_custom_tags"></a> [enable\_auto\_mode\_custom\_tags](#input\_enable\_auto\_mode\_custom\_tags) | Determines whether to enable permissions for custom tags resources created by EKS Auto Mode | `bool` | `true` | no |
+| <a name="input_enable_cluster_creator_admin_permissions"></a> [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry | `bool` | `false` | no |
+| <a name="input_enable_irsa"></a> [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no |
+| <a name="input_enable_kms_key_rotation"></a> [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled | `bool` | `true` | no |
+| <a name="input_enabled_log_types"></a> [enabled\_log\_types](#input\_enabled\_log\_types) | A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | `list(string)` | <pre>[<br/>  "audit",<br/>  "api",<br/>  "authenticator"<br/>]</pre> | no |
+| <a name="input_encryption_config"></a> [encryption\_config](#input\_encryption\_config) | Configuration block with encryption configuration for the cluster | <pre>object({<br/>    provider_key_arn = optional(string)<br/>    resources        = optional(list(string), ["secrets"])<br/>  })</pre> | `{}` | no |
+| <a name="input_encryption_policy_description"></a> [encryption\_policy\_description](#input\_encryption\_policy\_description) | Description of the cluster encryption policy created | `string` | `"Cluster encryption policy to allow cluster role to utilize CMK provided"` | no |
+| <a name="input_encryption_policy_name"></a> [encryption\_policy\_name](#input\_encryption\_policy\_name) | Name to use on cluster encryption policy created | `string` | `null` | no |
+| <a name="input_encryption_policy_path"></a> [encryption\_policy\_path](#input\_encryption\_policy\_path) | Cluster encryption policy path | `string` | `null` | no |
+| <a name="input_encryption_policy_tags"></a> [encryption\_policy\_tags](#input\_encryption\_policy\_tags) | A map of additional tags to add to the cluster encryption policy created | `map(string)` | `{}` | no |
+| <a name="input_encryption_policy_use_name_prefix"></a> [encryption\_policy\_use\_name\_prefix](#input\_encryption\_policy\_use\_name\_prefix) | Determines whether cluster encryption policy name (`cluster_encryption_policy_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_endpoint_private_access"></a> [endpoint\_private\_access](#input\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `true` | no |
+| <a name="input_endpoint_public_access"></a> [endpoint\_public\_access](#input\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `false` | no |
+| <a name="input_endpoint_public_access_cidrs"></a> [endpoint\_public\_access\_cidrs](#input\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` | <pre>[<br/>  "0.0.0.0/0"<br/>]</pre> | no |
+| <a name="input_fargate_profiles"></a> [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | <pre>map(object({<br/>    create = optional(bool)<br/><br/>    # Fargate profile<br/>    name       = optional(string) # Will fall back to map key<br/>    subnet_ids = optional(list(string))<br/>    selectors = optional(list(object({<br/>      labels    = optional(map(string))<br/>      namespace = string<br/>    })))<br/>    timeouts = optional(object({<br/>      create = optional(string)<br/>      delete = optional(string)<br/>    }))<br/><br/>    # IAM role<br/>    create_iam_role               = optional(bool)<br/>    iam_role_arn                  = optional(string)<br/>    iam_role_name                 = optional(string)<br/>    iam_role_use_name_prefix      = optional(bool)<br/>    iam_role_path                 = optional(string)<br/>    iam_role_description          = optional(string)<br/>    iam_role_permissions_boundary = optional(string)<br/>    iam_role_tags                 = optional(map(string))<br/>    iam_role_attach_cni_policy    = optional(bool)<br/>    iam_role_additional_policies  = optional(map(string))<br/>    create_iam_role_policy        = optional(bool)<br/>    iam_role_policy_statements = optional(list(object({<br/>      sid           = optional(string)<br/>      actions       = optional(list(string))<br/>      not_actions   = optional(list(string))<br/>      effect        = optional(string)<br/>      resources     = optional(list(string))<br/>      not_resources = optional(list(string))<br/>      principals = optional(list(object({<br/>        type        = string<br/>        identifiers = list(string)<br/>      })))<br/>      not_principals = optional(list(object({<br/>        type        = string<br/>        identifiers = list(string)<br/>      })))<br/>      condition = optional(list(object({<br/>        test     = string<br/>        values   = list(string)<br/>        variable = string<br/>      })))<br/>    })))<br/>    tags = optional(map(string))<br/>  }))</pre> | `null` | no |
+| <a name="input_force_update_version"></a> [force\_update\_version](#input\_force\_update\_version) | Force version update by overriding upgrade-blocking readiness checks when updating a cluster | `bool` | `null` | no |
+| <a name="input_iam_role_additional_policies"></a> [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no |
+| <a name="input_iam_role_arn"></a> [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| <a name="input_iam_role_description"></a> [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no |
+| <a name="input_iam_role_name"></a> [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| <a name="input_iam_role_path"></a> [iam\_role\_path](#input\_iam\_role\_path) | The IAM role path | `string` | `null` | no |
+| <a name="input_iam_role_permissions_boundary"></a> [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| <a name="input_iam_role_tags"></a> [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| <a name="input_iam_role_use_name_prefix"></a> [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_identity_providers"></a> [identity\_providers](#input\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | <pre>map(object({<br/>    client_id                     = string<br/>    groups_claim                  = optional(string)<br/>    groups_prefix                 = optional(string)<br/>    identity_provider_config_name = optional(string) # will fall back to map key<br/>    issuer_url                    = string<br/>    required_claims               = optional(map(string))<br/>    username_claim                = optional(string)<br/>    username_prefix               = optional(string)<br/>    tags                          = optional(map(string), {})<br/>  }))</pre> | `null` | no |
+| <a name="input_include_oidc_root_ca_thumbprint"></a> [include\_oidc\_root\_ca\_thumbprint](#input\_include\_oidc\_root\_ca\_thumbprint) | Determines whether to include the root CA thumbprint in the OpenID Connect (OIDC) identity provider's server certificate(s) | `bool` | `true` | no |
+| <a name="input_ip_family"></a> [ip\_family](#input\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `"ipv4"` | no |
+| <a name="input_kms_key_administrators"></a> [kms\_key\_administrators](#input\_kms\_key\_administrators) | A list of IAM ARNs for [key administrators](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-administrators). If no value is provided, the current caller identity is used to ensure at least one key admin is available | `list(string)` | `[]` | no |
+| <a name="input_kms_key_aliases"></a> [kms\_key\_aliases](#input\_kms\_key\_aliases) | A list of aliases to create. Note - due to the use of `toset()`, values must be static strings and not computed values | `list(string)` | `[]` | no |
+| <a name="input_kms_key_deletion_window_in_days"></a> [kms\_key\_deletion\_window\_in\_days](#input\_kms\_key\_deletion\_window\_in\_days) | The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. If you specify a value, it must be between `7` and `30`, inclusive. If you do not specify a value, it defaults to `30` | `number` | `null` | no |
+| <a name="input_kms_key_description"></a> [kms\_key\_description](#input\_kms\_key\_description) | The description of the key as viewed in AWS console | `string` | `null` | no |
+| <a name="input_kms_key_enable_default_policy"></a> [kms\_key\_enable\_default\_policy](#input\_kms\_key\_enable\_default\_policy) | Specifies whether to enable the default key policy | `bool` | `true` | no |
+| <a name="input_kms_key_override_policy_documents"></a> [kms\_key\_override\_policy\_documents](#input\_kms\_key\_override\_policy\_documents) | List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid` | `list(string)` | `[]` | no |
+| <a name="input_kms_key_owners"></a> [kms\_key\_owners](#input\_kms\_key\_owners) | A list of IAM ARNs for those who will have full key permissions (`kms:*`) | `list(string)` | `[]` | no |
+| <a name="input_kms_key_service_users"></a> [kms\_key\_service\_users](#input\_kms\_key\_service\_users) | A list of IAM ARNs for [key service users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-service-integration) | `list(string)` | `[]` | no |
+| <a name="input_kms_key_source_policy_documents"></a> [kms\_key\_source\_policy\_documents](#input\_kms\_key\_source\_policy\_documents) | List of IAM policy documents that are merged together into the exported document. Statements must have unique `sid`s | `list(string)` | `[]` | no |
+| <a name="input_kms_key_users"></a> [kms\_key\_users](#input\_kms\_key\_users) | A list of IAM ARNs for [key users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-users) | `list(string)` | `[]` | no |
+| <a name="input_kubernetes_version"></a> [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes `<major>.<minor>` version to use for the EKS cluster (i.e.: `1.33`) | `string` | `null` | no |
+| <a name="input_name"></a> [name](#input\_name) | Name of the EKS cluster | `string` | `""` | no |
+| <a name="input_node_iam_role_additional_policies"></a> [node\_iam\_role\_additional\_policies](#input\_node\_iam\_role\_additional\_policies) | Additional policies to be added to the EKS Auto node IAM role | `map(string)` | `{}` | no |
+| <a name="input_node_iam_role_description"></a> [node\_iam\_role\_description](#input\_node\_iam\_role\_description) | Description of the EKS Auto node IAM role | `string` | `null` | no |
+| <a name="input_node_iam_role_name"></a> [node\_iam\_role\_name](#input\_node\_iam\_role\_name) | Name to use on the EKS Auto node IAM role created | `string` | `null` | no |
+| <a name="input_node_iam_role_path"></a> [node\_iam\_role\_path](#input\_node\_iam\_role\_path) | The EKS Auto node IAM role path | `string` | `null` | no |
+| <a name="input_node_iam_role_permissions_boundary"></a> [node\_iam\_role\_permissions\_boundary](#input\_node\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the EKS Auto node IAM role | `string` | `null` | no |
+| <a name="input_node_iam_role_tags"></a> [node\_iam\_role\_tags](#input\_node\_iam\_role\_tags) | A map of additional tags to add to the EKS Auto node IAM role created | `map(string)` | `{}` | no |
+| <a name="input_node_iam_role_use_name_prefix"></a> [node\_iam\_role\_use\_name\_prefix](#input\_node\_iam\_role\_use\_name\_prefix) | Determines whether the EKS Auto node IAM role name (`node_iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_node_security_group_additional_rules"></a> [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | <pre>map(object({<br/>    protocol                      = optional(string, "tcp")<br/>    from_port                     = number<br/>    to_port                       = number<br/>    type                          = optional(string, "ingress")<br/>    description                   = optional(string)<br/>    cidr_blocks                   = optional(list(string))<br/>    ipv6_cidr_blocks              = optional(list(string))<br/>    prefix_list_ids               = optional(list(string))<br/>    self                          = optional(bool)<br/>    source_cluster_security_group = optional(bool, false)<br/>    source_security_group_id      = optional(string)<br/>  }))</pre> | `{}` | no |
+| <a name="input_node_security_group_description"></a> [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no |
+| <a name="input_node_security_group_enable_recommended_rules"></a> [node\_security\_group\_enable\_recommended\_rules](#input\_node\_security\_group\_enable\_recommended\_rules) | Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic | `bool` | `true` | no |
+| <a name="input_node_security_group_id"></a> [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no |
+| <a name="input_node_security_group_name"></a> [node\_security\_group\_name](#input\_node\_security\_group\_name) | Name to use on node security group created | `string` | `null` | no |
+| <a name="input_node_security_group_tags"></a> [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no |
+| <a name="input_node_security_group_use_name_prefix"></a> [node\_security\_group\_use\_name\_prefix](#input\_node\_security\_group\_use\_name\_prefix) | Determines whether node security group name (`node_security_group_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_openid_connect_audiences"></a> [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
+| <a name="input_outpost_config"></a> [outpost\_config](#input\_outpost\_config) | Configuration for the AWS Outpost to provision the cluster on | <pre>object({<br/>    control_plane_instance_type = optional(string)<br/>    control_plane_placement = optional(object({<br/>      group_name = string<br/>    }))<br/>    outpost_arns = list(string)<br/>  })</pre> | `null` | no |
+| <a name="input_prefix_separator"></a> [prefix\_separator](#input\_prefix\_separator) | The separator to use between the prefix and the generated timestamp for resource names | `string` | `"-"` | no |
+| <a name="input_putin_khuylo"></a> [putin\_khuylo](#input\_putin\_khuylo) | Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo! | `bool` | `true` | no |
+| <a name="input_region"></a> [region](#input\_region) | Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration | `string` | `null` | no |
+| <a name="input_remote_network_config"></a> [remote\_network\_config](#input\_remote\_network\_config) | Configuration block for the cluster remote network configuration | <pre>object({<br/>    remote_node_networks = object({<br/>      cidrs = optional(list(string))<br/>    })<br/>    remote_pod_networks = optional(object({<br/>      cidrs = optional(list(string))<br/>    }))<br/>  })</pre> | `null` | no |
+| <a name="input_security_group_additional_rules"></a> [security\_group\_additional\_rules](#input\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | <pre>map(object({<br/>    protocol                   = optional(string, "tcp")<br/>    from_port                  = number<br/>    to_port                    = number<br/>    type                       = optional(string, "ingress")<br/>    description                = optional(string)<br/>    cidr_blocks                = optional(list(string))<br/>    ipv6_cidr_blocks           = optional(list(string))<br/>    prefix_list_ids            = optional(list(string))<br/>    self                       = optional(bool)<br/>    source_node_security_group = optional(bool, false)<br/>    source_security_group_id   = optional(string)<br/>  }))</pre> | `{}` | no |
+| <a name="input_security_group_description"></a> [security\_group\_description](#input\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no |
+| <a name="input_security_group_id"></a> [security\_group\_id](#input\_security\_group\_id) | Existing security group ID to be attached to the cluster | `string` | `""` | no |
+| <a name="input_security_group_name"></a> [security\_group\_name](#input\_security\_group\_name) | Name to use on cluster security group created | `string` | `null` | no |
+| <a name="input_security_group_tags"></a> [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
+| <a name="input_security_group_use_name_prefix"></a> [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_self_managed_node_groups"></a> [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Map of self-managed node group definitions to create | <pre>map(object({<br/>    create             = optional(bool)<br/>    kubernetes_version = optional(string)<br/><br/>    # Autoscaling Group<br/>    create_autoscaling_group         = optional(bool)<br/>    name                             = optional(string) # Will fall back to map key<br/>    use_name_prefix                  = optional(bool)<br/>    availability_zones               = optional(list(string))<br/>    subnet_ids                       = optional(list(string))<br/>    min_size                         = optional(number)<br/>    max_size                         = optional(number)<br/>    desired_size                     = optional(number)<br/>    desired_size_type                = optional(string)<br/>    capacity_rebalance               = optional(bool)<br/>    default_instance_warmup          = optional(number)<br/>    protect_from_scale_in            = optional(bool)<br/>    context                          = optional(string)<br/>    create_placement_group           = optional(bool)<br/>    placement_group                  = optional(string)<br/>    health_check_type                = optional(string)<br/>    health_check_grace_period        = optional(number)<br/>    ignore_failed_scaling_activities = optional(bool)<br/>    force_delete                     = optional(bool)<br/>    termination_policies             = optional(list(string))<br/>    suspended_processes              = optional(list(string))<br/>    max_instance_lifetime            = optional(number)<br/>    enabled_metrics                  = optional(list(string))<br/>    metrics_granularity              = optional(string)<br/>    initial_lifecycle_hooks = optional(list(object({<br/>      default_result          = optional(string)<br/>      heartbeat_timeout       = optional(number)<br/>      lifecycle_transition    = string<br/>      name                    = string<br/>      notification_metadata   = optional(string)<br/>      notification_target_arn = optional(string)<br/>      role_arn                = optional(string)<br/>    })))<br/>    instance_maintenance_policy = optional(object({<br/>      max_healthy_percentage = number<br/>      min_healthy_percentage = number<br/>    }))<br/>    instance_refresh = optional(object({<br/>      preferences = optional(object({<br/>        alarm_specification = optional(object({<br/>          alarms = optional(list(string))<br/>        }))<br/>        auto_rollback                = optional(bool)<br/>        checkpoint_delay             = optional(number)<br/>        checkpoint_percentages       = optional(list(number))<br/>        instance_warmup              = optional(number)<br/>        max_healthy_percentage       = optional(number)<br/>        min_healthy_percentage       = optional(number)<br/>        scale_in_protected_instances = optional(string)<br/>        skip_matching                = optional(bool)<br/>        standby_instances            = optional(string)<br/>      }))<br/>      strategy = optional(string)<br/>      triggers = optional(list(string))<br/>    }))<br/>    use_mixed_instances_policy = optional(bool)<br/>    mixed_instances_policy = optional(object({<br/>      instances_distribution = optional(object({<br/>        on_demand_allocation_strategy            = optional(string)<br/>        on_demand_base_capacity                  = optional(number)<br/>        on_demand_percentage_above_base_capacity = optional(number)<br/>        spot_allocation_strategy                 = optional(string)<br/>        spot_instance_pools                      = optional(number)<br/>        spot_max_price                           = optional(string)<br/>      }))<br/>      launch_template = object({<br/>        override = optional(list(object({<br/>          instance_requirements = optional(object({<br/>            accelerator_count = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            accelerator_manufacturers = optional(list(string))<br/>            accelerator_names         = optional(list(string))<br/>            accelerator_total_memory_mib = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            accelerator_types      = optional(list(string))<br/>            allowed_instance_types = optional(list(string))<br/>            bare_metal             = optional(string)<br/>            baseline_ebs_bandwidth_mbps = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            burstable_performance                                   = optional(string)<br/>            cpu_manufacturers                                       = optional(list(string))<br/>            excluded_instance_types                                 = optional(list(string))<br/>            instance_generations                                    = optional(list(string))<br/>            local_storage                                           = optional(string)<br/>            local_storage_types                                     = optional(list(string))<br/>            max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)<br/>            memory_gib_per_vcpu = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            memory_mib = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            network_bandwidth_gbps = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            network_interface_count = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            on_demand_max_price_percentage_over_lowest_price = optional(number)<br/>            require_hibernate_support                        = optional(bool)<br/>            spot_max_price_percentage_over_lowest_price      = optional(number)<br/>            total_local_storage_gb = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>            vcpu_count = optional(object({<br/>              max = optional(number)<br/>              min = optional(number)<br/>            }))<br/>          }))<br/>          instance_type = optional(string)<br/>          launch_template_specification = optional(object({<br/>            launch_template_id   = optional(string)<br/>            launch_template_name = optional(string)<br/>            version              = optional(string)<br/>          }))<br/>          weighted_capacity = optional(string)<br/>        })))<br/>      })<br/>    }))<br/>    timeouts = optional(object({<br/>      delete = optional(string)<br/>    }))<br/>    autoscaling_group_tags = optional(map(string))<br/>    # User data<br/>    ami_type                   = optional(string)<br/>    additional_cluster_dns_ips = optional(list(string))<br/>    pre_bootstrap_user_data    = optional(string)<br/>    post_bootstrap_user_data   = optional(string)<br/>    bootstrap_extra_args       = optional(string)<br/>    user_data_template_path    = optional(string)<br/>    cloudinit_pre_nodeadm = optional(list(object({<br/>      content      = string<br/>      content_type = optional(string)<br/>      filename     = optional(string)<br/>      merge_type   = optional(string)<br/>    })))<br/>    cloudinit_post_nodeadm = optional(list(object({<br/>      content      = string<br/>      content_type = optional(string)<br/>      filename     = optional(string)<br/>      merge_type   = optional(string)<br/>    })))<br/>    # Launch Template<br/>    create_launch_template                 = optional(bool)<br/>    use_custom_launch_template             = optional(bool)<br/>    launch_template_id                     = optional(string)<br/>    launch_template_name                   = optional(string) # Will fall back to map key<br/>    launch_template_use_name_prefix        = optional(bool)<br/>    launch_template_version                = optional(string)<br/>    launch_template_default_version        = optional(string)<br/>    update_launch_template_default_version = optional(bool)<br/>    launch_template_description            = optional(string)<br/>    launch_template_tags                   = optional(map(string))<br/>    tag_specifications                     = optional(list(string))<br/>    ebs_optimized                          = optional(bool)<br/>    ami_id                                 = optional(string)<br/>    instance_type                          = optional(string)<br/>    key_name                               = optional(string)<br/>    disable_api_termination                = optional(bool)<br/>    instance_initiated_shutdown_behavior   = optional(string)<br/>    kernel_id                              = optional(string)<br/>    ram_disk_id                            = optional(string)<br/>    block_device_mappings = optional(map(object({<br/>      device_name = optional(string)<br/>      ebs = optional(object({<br/>        delete_on_termination      = optional(bool)<br/>        encrypted                  = optional(bool)<br/>        iops                       = optional(number)<br/>        kms_key_id                 = optional(string)<br/>        snapshot_id                = optional(string)<br/>        throughput                 = optional(number)<br/>        volume_initialization_rate = optional(number)<br/>        volume_size                = optional(number)<br/>        volume_type                = optional(string)<br/>      }))<br/>      no_device    = optional(string)<br/>      virtual_name = optional(string)<br/>    })))<br/>    capacity_reservation_specification = optional(object({<br/>      capacity_reservation_preference = optional(string)<br/>      capacity_reservation_target = optional(object({<br/>        capacity_reservation_id                 = optional(string)<br/>        capacity_reservation_resource_group_arn = optional(string)<br/>      }))<br/>    }))<br/>    cpu_options = optional(object({<br/>      amd_sev_snp      = optional(string)<br/>      core_count       = optional(number)<br/>      threads_per_core = optional(number)<br/>    }))<br/>    credit_specification = optional(object({<br/>      cpu_credits = optional(string)<br/>    }))<br/>    enclave_options = optional(object({<br/>      enabled = optional(bool)<br/>    }))<br/>    instance_requirements = optional(object({<br/>      accelerator_count = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      accelerator_manufacturers = optional(list(string))<br/>      accelerator_names         = optional(list(string))<br/>      accelerator_total_memory_mib = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      accelerator_types      = optional(list(string))<br/>      allowed_instance_types = optional(list(string))<br/>      bare_metal             = optional(string)<br/>      baseline_ebs_bandwidth_mbps = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      burstable_performance                                   = optional(string)<br/>      cpu_manufacturers                                       = optional(list(string))<br/>      excluded_instance_types                                 = optional(list(string))<br/>      instance_generations                                    = optional(list(string))<br/>      local_storage                                           = optional(string)<br/>      local_storage_types                                     = optional(list(string))<br/>      max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)<br/>      memory_gib_per_vcpu = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      memory_mib = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      network_bandwidth_gbps = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      network_interface_count = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      on_demand_max_price_percentage_over_lowest_price = optional(number)<br/>      require_hibernate_support                        = optional(bool)<br/>      spot_max_price_percentage_over_lowest_price      = optional(number)<br/>      total_local_storage_gb = optional(object({<br/>        max = optional(number)<br/>        min = optional(number)<br/>      }))<br/>      vcpu_count = optional(object({<br/>        max = optional(number)<br/>        min = string<br/>      }))<br/>    }))<br/>    instance_market_options = optional(object({<br/>      market_type = optional(string)<br/>      spot_options = optional(object({<br/>        block_duration_minutes         = optional(number)<br/>        instance_interruption_behavior = optional(string)<br/>        max_price                      = optional(string)<br/>        spot_instance_type             = optional(string)<br/>        valid_until                    = optional(string)<br/>      }))<br/>    }))<br/>    license_specifications = optional(list(object({<br/>      license_configuration_arn = string<br/>    })))<br/>    metadata_options = optional(object({<br/>      http_endpoint               = optional(string)<br/>      http_protocol_ipv6          = optional(string)<br/>      http_put_response_hop_limit = optional(number)<br/>      http_tokens                 = optional(string)<br/>      instance_metadata_tags      = optional(string)<br/>    }))<br/>    enable_monitoring  = optional(bool)<br/>    enable_efa_support = optional(bool)<br/>    enable_efa_only    = optional(bool)<br/>    efa_indices        = optional(list(string))<br/>    network_interfaces = optional(list(object({<br/>      associate_carrier_ip_address = optional(bool)<br/>      associate_public_ip_address  = optional(bool)<br/>      connection_tracking_specification = optional(object({<br/>        tcp_established_timeout = optional(number)<br/>        udp_stream_timeout      = optional(number)<br/>        udp_timeout             = optional(number)<br/>      }))<br/>      delete_on_termination = optional(bool)<br/>      description           = optional(string)<br/>      device_index          = optional(number)<br/>      ena_srd_specification = optional(object({<br/>        ena_srd_enabled = optional(bool)<br/>        ena_srd_udp_specification = optional(object({<br/>          ena_srd_udp_enabled = optional(bool)<br/>        }))<br/>      }))<br/>      interface_type       = optional(string)<br/>      ipv4_address_count   = optional(number)<br/>      ipv4_addresses       = optional(list(string))<br/>      ipv4_prefix_count    = optional(number)<br/>      ipv4_prefixes        = optional(list(string))<br/>      ipv6_address_count   = optional(number)<br/>      ipv6_addresses       = optional(list(string))<br/>      ipv6_prefix_count    = optional(number)<br/>      ipv6_prefixes        = optional(list(string))<br/>      network_card_index   = optional(number)<br/>      network_interface_id = optional(string)<br/>      primary_ipv6         = optional(bool)<br/>      private_ip_address   = optional(string)<br/>      security_groups      = optional(list(string))<br/>      subnet_id            = optional(string)<br/>    })))<br/>    placement = optional(object({<br/>      affinity                = optional(string)<br/>      availability_zone       = optional(string)<br/>      group_name              = optional(string)<br/>      host_id                 = optional(string)<br/>      host_resource_group_arn = optional(string)<br/>      partition_number        = optional(number)<br/>      spread_domain           = optional(string)<br/>      tenancy                 = optional(string)<br/>    }))<br/>    maintenance_options = optional(object({<br/>      auto_recovery = optional(string)<br/>    }))<br/>    private_dns_name_options = optional(object({<br/>      enable_resource_name_dns_aaaa_record = optional(bool)<br/>      enable_resource_name_dns_a_record    = optional(bool)<br/>      hostname_type                        = optional(string)<br/>    }))<br/>    # IAM role<br/>    create_iam_instance_profile   = optional(bool)<br/>    iam_instance_profile_arn      = optional(string)<br/>    iam_role_name                 = optional(string)<br/>    iam_role_use_name_prefix      = optional(bool)<br/>    iam_role_path                 = optional(string)<br/>    iam_role_description          = optional(string)<br/>    iam_role_permissions_boundary = optional(string)<br/>    iam_role_tags                 = optional(map(string))<br/>    iam_role_attach_cni_policy    = optional(bool)<br/>    iam_role_additional_policies  = optional(map(string))<br/>    create_iam_role_policy        = optional(bool)<br/>    iam_role_policy_statements = optional(list(object({<br/>      sid           = optional(string)<br/>      actions       = optional(list(string))<br/>      not_actions   = optional(list(string))<br/>      effect        = optional(string)<br/>      resources     = optional(list(string))<br/>      not_resources = optional(list(string))<br/>      principals = optional(list(object({<br/>        type        = string<br/>        identifiers = list(string)<br/>      })))<br/>      not_principals = optional(list(object({<br/>        type        = string<br/>        identifiers = list(string)<br/>      })))<br/>      condition = optional(list(object({<br/>        test     = string<br/>        values   = list(string)<br/>        variable = string<br/>      })))<br/>    })))<br/>    # Access entry<br/>    create_access_entry = optional(bool)<br/>    iam_role_arn        = optional(string)<br/>    # Security group<br/>    vpc_security_group_ids                = optional(list(string), [])<br/>    attach_cluster_primary_security_group = optional(bool, false)<br/>    create_security_group                 = optional(bool)<br/>    security_group_name                   = optional(string)<br/>    security_group_use_name_prefix        = optional(bool)<br/>    security_group_description            = optional(string)<br/>    security_group_ingress_rules = optional(map(object({<br/>      name                         = optional(string)<br/>      cidr_ipv4                    = optional(string)<br/>      cidr_ipv6                    = optional(string)<br/>      description                  = optional(string)<br/>      from_port                    = optional(string)<br/>      ip_protocol                  = optional(string)<br/>      prefix_list_id               = optional(string)<br/>      referenced_security_group_id = optional(string)<br/>      self                         = optional(bool)<br/>      tags                         = optional(map(string))<br/>      to_port                      = optional(string)<br/>    })))<br/>    security_group_egress_rules = optional(map(object({<br/>      name                         = optional(string)<br/>      cidr_ipv4                    = optional(string)<br/>      cidr_ipv6                    = optional(string)<br/>      description                  = optional(string)<br/>      from_port                    = optional(string)<br/>      ip_protocol                  = optional(string)<br/>      prefix_list_id               = optional(string)<br/>      referenced_security_group_id = optional(string)<br/>      self                         = optional(bool)<br/>      tags                         = optional(map(string))<br/>      to_port                      = optional(string)<br/>    })))<br/>    security_group_tags = optional(map(string))<br/><br/>    tags = optional(map(string))<br/>  }))</pre> | `null` | no |
+| <a name="input_service_ipv4_cidr"></a> [service\_ipv4\_cidr](#input\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
+| <a name="input_service_ipv6_cidr"></a> [service\_ipv6\_cidr](#input\_service\_ipv6\_cidr) | The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster | `string` | `null` | no |
+| <a name="input_subnet_ids"></a> [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs where the nodes/node groups will be provisioned. If `control_plane_subnet_ids` is not provided, the EKS cluster control plane (ENIs) will be provisioned in these subnets | `list(string)` | `[]` | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| <a name="input_timeouts"></a> [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the cluster | <pre>object({<br/>    create = optional(string)<br/>    update = optional(string)<br/>    delete = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_upgrade_policy"></a> [upgrade\_policy](#input\_upgrade\_policy) | Configuration block for the cluster upgrade policy | <pre>object({<br/>    support_type = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_vpc_id"></a> [vpc\_id](#input\_vpc\_id) | ID of the VPC where the cluster security group will be provisioned | `string` | `null` | no |
+| <a name="input_zonal_shift_config"></a> [zonal\_shift\_config](#input\_zonal\_shift\_config) | Configuration block for the cluster zonal shift | <pre>object({<br/>    enabled = optional(bool)<br/>  })</pre> | `null` | no |
 
 ## Outputs
 
 | Name | Description |
 |------|-------------|
+| <a name="output_access_entries"></a> [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes |
+| <a name="output_access_policy_associations"></a> [access\_policy\_associations](#output\_access\_policy\_associations) | Map of eks cluster access policy associations created and their attributes |
 | <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
 | <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
-| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster. |
-| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster. |
-| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | The endpoint for your EKS Kubernetes API. |
-| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster. |
-| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster. |
-| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready. |
-| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer |
-| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console. |
-| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ID attached to the EKS cluster. On 1.14 or later, this is the 'Additional security groups' in the EKS console. |
-| <a name="output_cluster_version"></a> [cluster\_version](#output\_cluster\_version) | The Kubernetes server version for the EKS cluster. |
-| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
-| <a name="output_fargate_iam_role_arn"></a> [fargate\_iam\_role\_arn](#output\_fargate\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
-| <a name="output_fargate_iam_role_name"></a> [fargate\_iam\_role\_name](#output\_fargate\_iam\_role\_name) | IAM role name for EKS Fargate pods |
-| <a name="output_fargate_profile_arns"></a> [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
-| <a name="output_fargate_profile_ids"></a> [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| <a name="output_kubeconfig"></a> [kubeconfig](#output\_kubeconfig) | kubectl config file contents for this EKS cluster. Will block on cluster creation until the cluster is really ready. |
-| <a name="output_kubeconfig_filename"></a> [kubeconfig\_filename](#output\_kubeconfig\_filename) | The filename of the generated kubectl config. Will block on cluster creation until the cluster is really ready. |
-| <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by var.node\_groups keys |
-| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
-| <a name="output_security_group_rule_cluster_https_worker_ingress"></a> [security\_group\_rule\_cluster\_https\_worker\_ingress](#output\_security\_group\_rule\_cluster\_https\_worker\_ingress) | Security group rule responsible for allowing pods to communicate with the EKS cluster API. |
-| <a name="output_worker_iam_instance_profile_arns"></a> [worker\_iam\_instance\_profile\_arns](#output\_worker\_iam\_instance\_profile\_arns) | default IAM instance profile ARN for EKS worker groups |
-| <a name="output_worker_iam_instance_profile_names"></a> [worker\_iam\_instance\_profile\_names](#output\_worker\_iam\_instance\_profile\_names) | default IAM instance profile name for EKS worker groups |
-| <a name="output_worker_iam_role_arn"></a> [worker\_iam\_role\_arn](#output\_worker\_iam\_role\_arn) | default IAM role ARN for EKS worker groups |
-| <a name="output_worker_iam_role_name"></a> [worker\_iam\_role\_name](#output\_worker\_iam\_role\_name) | default IAM role name for EKS worker groups |
-| <a name="output_worker_security_group_id"></a> [worker\_security\_group\_id](#output\_worker\_security\_group\_id) | Security group ID attached to the EKS workers. |
-| <a name="output_workers_asg_arns"></a> [workers\_asg\_arns](#output\_workers\_asg\_arns) | IDs of the autoscaling groups containing workers. |
-| <a name="output_workers_asg_names"></a> [workers\_asg\_names](#output\_workers\_asg\_names) | Names of the autoscaling groups containing workers. |
-| <a name="output_workers_default_ami_id"></a> [workers\_default\_ami\_id](#output\_workers\_default\_ami\_id) | ID of the default worker group AMI |
-| <a name="output_workers_default_ami_id_windows"></a> [workers\_default\_ami\_id\_windows](#output\_workers\_default\_ami\_id\_windows) | ID of the default Windows worker group AMI |
-| <a name="output_workers_launch_template_arns"></a> [workers\_launch\_template\_arns](#output\_workers\_launch\_template\_arns) | ARNs of the worker launch templates. |
-| <a name="output_workers_launch_template_ids"></a> [workers\_launch\_template\_ids](#output\_workers\_launch\_template\_ids) | IDs of the worker launch templates. |
-| <a name="output_workers_launch_template_latest_versions"></a> [workers\_launch\_template\_latest\_versions](#output\_workers\_launch\_template\_latest\_versions) | Latest versions of the worker launch templates. |
-| <a name="output_workers_user_data"></a> [workers\_user\_data](#output\_workers\_user\_data) | User data of worker groups |
-<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
+| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| <a name="output_cluster_dualstack_oidc_issuer_url"></a> [cluster\_dualstack\_oidc\_issuer\_url](#output\_cluster\_dualstack\_oidc\_issuer\_url) | Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | Cluster IAM role ARN |
+| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | Cluster IAM role name |
+| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
+| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| <a name="output_cluster_ip_family"></a> [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) |
+| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
+| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| <a name="output_cluster_service_cidr"></a> [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from |
+| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| <a name="output_cluster_tls_certificate_sha1_fingerprint"></a> [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate |
+| <a name="output_cluster_version"></a> [cluster\_version](#output\_cluster\_version) | The Kubernetes version for the cluster |
+| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| <a name="output_eks_managed_node_groups_autoscaling_group_names"></a> [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups |
+| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| <a name="output_kms_key_arn"></a> [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key |
+| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key |
+| <a name="output_kms_key_policy"></a> [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key |
+| <a name="output_node_iam_role_arn"></a> [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | EKS Auto node IAM role ARN |
+| <a name="output_node_iam_role_name"></a> [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | EKS Auto node IAM role name |
+| <a name="output_node_iam_role_unique_id"></a> [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| <a name="output_oidc_provider"></a> [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
+| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| <a name="output_self_managed_node_groups_autoscaling_group_names"></a> [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups |
+<!-- END_TF_DOCS -->
+
+## License
+
+Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details.
+
+## Additional information for users from Russia and Belarus
+
+* Russia has [illegally annexed Crimea in 2014](https://en.wikipedia.org/wiki/Annexation_of_Crimea_by_the_Russian_Federation) and [brought the war in Donbas](https://en.wikipedia.org/wiki/War_in_Donbas) followed by [full-scale invasion of Ukraine in 2022](https://en.wikipedia.org/wiki/2022_Russian_invasion_of_Ukraine).
+* Russia has brought sorrow and devastations to millions of Ukrainians, killed hundreds of innocent people, damaged thousands of buildings, and forced several million people to flee.
+* [Putin khuylo!](https://en.wikipedia.org/wiki/Putin_khuylo!)
diff --git a/aws_auth.tf b/aws_auth.tf
deleted file mode 100644
index 6eb563203d..0000000000
--- a/aws_auth.tf
+++ /dev/null
@@ -1,91 +0,0 @@
-locals {
-  auth_launch_template_worker_roles = [
-    for index in range(0, var.create_eks ? local.worker_group_launch_template_count : 0) : {
-      worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
-        coalescelist(
-          aws_iam_instance_profile.workers_launch_template.*.role,
-          data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
-          [""]
-        ),
-        index
-      )}"
-      platform = lookup(
-        var.worker_groups_launch_template[index],
-        "platform",
-        local.workers_group_defaults["platform"]
-      )
-    }
-  ]
-
-  auth_worker_roles = [
-    for index in range(0, var.create_eks ? local.worker_group_count : 0) : {
-      worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
-        coalescelist(
-          aws_iam_instance_profile.workers.*.role,
-          data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
-          [""]
-        ),
-        index,
-      )}"
-      platform = lookup(
-        var.worker_groups[index],
-        "platform",
-        local.workers_group_defaults["platform"]
-      )
-    }
-  ]
-
-  # Convert to format needed by aws-auth ConfigMap
-  configmap_roles = [
-    for role in concat(
-      local.auth_launch_template_worker_roles,
-      local.auth_worker_roles,
-      module.node_groups.aws_auth_roles,
-      module.fargate.aws_auth_roles,
-    ) :
-    {
-      # Work around https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
-      # Strip the leading slash off so that Terraform doesn't think it's a regex
-      rolearn  = replace(role["worker_role_arn"], replace(var.iam_path, "/^//", ""), "")
-      username = role["platform"] == "fargate" ? "system:node:{{SessionName}}" : "system:node:{{EC2PrivateDNSName}}"
-      groups = tolist(concat(
-        [
-          "system:bootstrappers",
-          "system:nodes",
-        ],
-        role["platform"] == "windows" ? ["eks:kube-proxy-windows"] : [],
-        role["platform"] == "fargate" ? ["system:node-proxier"] : [],
-      ))
-    }
-  ]
-}
-
-resource "kubernetes_config_map" "aws_auth" {
-  count      = var.create_eks && var.manage_aws_auth ? 1 : 0
-  depends_on = [data.http.wait_for_cluster[0]]
-
-  metadata {
-    name      = "aws-auth"
-    namespace = "kube-system"
-    labels = merge(
-      {
-        "app.kubernetes.io/managed-by" = "Terraform"
-        # / are replaced by . because label validator fails in this lib
-        # https://github.com/kubernetes/apimachinery/blob/1bdd76d09076d4dc0362456e59c8f551f5f24a72/pkg/util/validation/validation.go#L166
-        "terraform.io/module" = "terraform-aws-modules.eks.aws"
-      },
-      var.aws_auth_additional_labels
-    )
-  }
-
-  data = {
-    mapRoles = yamlencode(
-      distinct(concat(
-        local.configmap_roles,
-        var.map_roles,
-      ))
-    )
-    mapUsers    = yamlencode(var.map_users)
-    mapAccounts = yamlencode(var.map_accounts)
-  }
-}
diff --git a/cluster.tf b/cluster.tf
deleted file mode 100644
index 13d38a09a7..0000000000
--- a/cluster.tf
+++ /dev/null
@@ -1,175 +0,0 @@
-resource "aws_cloudwatch_log_group" "this" {
-  count             = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
-  name              = "/aws/eks/${var.cluster_name}/cluster"
-  retention_in_days = var.cluster_log_retention_in_days
-  kms_key_id        = var.cluster_log_kms_key_id
-  tags              = var.tags
-}
-
-resource "aws_eks_cluster" "this" {
-  count                     = var.create_eks ? 1 : 0
-  name                      = var.cluster_name
-  enabled_cluster_log_types = var.cluster_enabled_log_types
-  role_arn                  = local.cluster_iam_role_arn
-  version                   = var.cluster_version
-  tags                      = var.tags
-
-  vpc_config {
-    security_group_ids      = compact([local.cluster_security_group_id])
-    subnet_ids              = var.subnets
-    endpoint_private_access = var.cluster_endpoint_private_access
-    endpoint_public_access  = var.cluster_endpoint_public_access
-    public_access_cidrs     = var.cluster_endpoint_public_access_cidrs
-  }
-
-  kubernetes_network_config {
-    service_ipv4_cidr = var.cluster_service_ipv4_cidr
-  }
-
-  timeouts {
-    create = var.cluster_create_timeout
-    delete = var.cluster_delete_timeout
-  }
-
-  dynamic "encryption_config" {
-    for_each = toset(var.cluster_encryption_config)
-
-    content {
-      provider {
-        key_arn = encryption_config.value["provider_key_arn"]
-      }
-      resources = encryption_config.value["resources"]
-    }
-  }
-
-  depends_on = [
-    aws_security_group_rule.cluster_egress_internet,
-    aws_security_group_rule.cluster_https_worker_ingress,
-    aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
-    aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
-    aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy,
-    aws_cloudwatch_log_group.this
-  ]
-}
-
-resource "aws_security_group" "cluster" {
-  count       = var.cluster_create_security_group && var.create_eks ? 1 : 0
-  name_prefix = var.cluster_name
-  description = "EKS cluster security group."
-  vpc_id      = var.vpc_id
-  tags = merge(
-    var.tags,
-    {
-      "Name" = "${var.cluster_name}-eks_cluster_sg"
-    },
-  )
-}
-
-resource "aws_security_group_rule" "cluster_egress_internet" {
-  count             = var.cluster_create_security_group && var.create_eks ? 1 : 0
-  description       = "Allow cluster egress access to the Internet."
-  protocol          = "-1"
-  security_group_id = local.cluster_security_group_id
-  cidr_blocks       = var.cluster_egress_cidrs
-  from_port         = 0
-  to_port           = 0
-  type              = "egress"
-}
-
-resource "aws_security_group_rule" "cluster_https_worker_ingress" {
-  count                    = var.cluster_create_security_group && var.create_eks ? 1 : 0
-  description              = "Allow pods to communicate with the EKS cluster API."
-  protocol                 = "tcp"
-  security_group_id        = local.cluster_security_group_id
-  source_security_group_id = local.worker_security_group_id
-  from_port                = 443
-  to_port                  = 443
-  type                     = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
-  count       = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? 1 : 0
-  description = "Allow private K8S API ingress from custom CIDR source."
-  type        = "ingress"
-  from_port   = 443
-  to_port     = 443
-  protocol    = "tcp"
-  cidr_blocks = var.cluster_endpoint_private_access_cidrs
-
-  security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
-}
-
-resource "aws_security_group_rule" "cluster_private_access_sg_source" {
-  count                    = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
-  description              = "Allow private K8S API ingress from custom Security Groups source."
-  type                     = "ingress"
-  from_port                = 443
-  to_port                  = 443
-  protocol                 = "tcp"
-  source_security_group_id = var.cluster_endpoint_private_access_sg[count.index]
-
-  security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
-}
-
-resource "aws_iam_role" "cluster" {
-  count                 = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-  name_prefix           = var.cluster_iam_role_name != "" ? null : var.cluster_name
-  name                  = var.cluster_iam_role_name != "" ? var.cluster_iam_role_name : null
-  assume_role_policy    = data.aws_iam_policy_document.cluster_assume_role_policy.json
-  permissions_boundary  = var.permissions_boundary
-  path                  = var.iam_path
-  force_detach_policies = true
-  tags                  = var.tags
-}
-
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
-  count      = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-  policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy"
-  role       = local.cluster_iam_role_name
-}
-
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
-  count      = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-  policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy"
-  role       = local.cluster_iam_role_name
-}
-
-resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceControllerPolicy" {
-  count      = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-  policy_arn = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController"
-  role       = local.cluster_iam_role_name
-}
-
-/*
- Adding a policy to cluster IAM role that allow permissions
- required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
-*/
-
-data "aws_iam_policy_document" "cluster_elb_sl_role_creation" {
-  count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-
-  statement {
-    effect = "Allow"
-    actions = [
-      "ec2:DescribeAccountAttributes",
-      "ec2:DescribeInternetGateways",
-      "ec2:DescribeAddresses"
-    ]
-    resources = ["*"]
-  }
-}
-
-resource "aws_iam_policy" "cluster_elb_sl_role_creation" {
-  count       = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-  name_prefix = "${var.cluster_name}-elb-sl-role-creation"
-  description = "Permissions for EKS to create AWSServiceRoleForElasticLoadBalancing service-linked role"
-  policy      = data.aws_iam_policy_document.cluster_elb_sl_role_creation[0].json
-  path        = var.iam_path
-  tags        = var.tags
-}
-
-resource "aws_iam_role_policy_attachment" "cluster_elb_sl_role_creation" {
-  count      = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
-  policy_arn = aws_iam_policy.cluster_elb_sl_role_creation[0].arn
-  role       = local.cluster_iam_role_name
-}
diff --git a/data.tf b/data.tf
deleted file mode 100644
index bc80e74a69..0000000000
--- a/data.tf
+++ /dev/null
@@ -1,100 +0,0 @@
-data "aws_partition" "current" {}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_iam_policy_document" "workers_assume_role_policy" {
-  statement {
-    sid = "EKSWorkerAssumeRole"
-
-    actions = [
-      "sts:AssumeRole",
-    ]
-
-    principals {
-      type        = "Service"
-      identifiers = [local.ec2_principal]
-    }
-  }
-}
-
-data "aws_ami" "eks_worker" {
-  count = local.worker_has_linux_ami ? 1 : 0
-
-  filter {
-    name   = "name"
-    values = [local.worker_ami_name_filter]
-  }
-
-  most_recent = true
-
-  owners = [var.worker_ami_owner_id]
-}
-
-data "aws_ami" "eks_worker_windows" {
-  count = local.worker_has_windows_ami ? 1 : 0
-
-  filter {
-    name   = "name"
-    values = [local.worker_ami_name_filter_windows]
-  }
-
-  filter {
-    name   = "platform"
-    values = ["windows"]
-  }
-
-  most_recent = true
-
-  owners = [var.worker_ami_owner_id_windows]
-}
-
-data "aws_iam_policy_document" "cluster_assume_role_policy" {
-  statement {
-    sid = "EKSClusterAssumeRole"
-
-    actions = [
-      "sts:AssumeRole",
-    ]
-
-    principals {
-      type        = "Service"
-      identifiers = ["eks.amazonaws.com"]
-    }
-  }
-}
-
-data "aws_iam_role" "custom_cluster_iam_role" {
-  count = var.manage_cluster_iam_resources ? 0 : 1
-  name  = var.cluster_iam_role_name
-}
-
-data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
-  count = var.manage_worker_iam_resources ? 0 : local.worker_group_count
-  name = lookup(
-    var.worker_groups[count.index],
-    "iam_instance_profile_name",
-    local.workers_group_defaults["iam_instance_profile_name"],
-  )
-}
-
-data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
-  count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
-  name = lookup(
-    var.worker_groups_launch_template[count.index],
-    "iam_instance_profile_name",
-    local.workers_group_defaults["iam_instance_profile_name"],
-  )
-}
-
-data "http" "wait_for_cluster" {
-  count          = var.create_eks && var.manage_aws_auth ? 1 : 0
-  url            = format("%s/healthz", aws_eks_cluster.this[0].endpoint)
-  ca_certificate = base64decode(coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0])
-  timeout        = var.wait_for_cluster_timeout
-
-  depends_on = [
-    aws_eks_cluster.this,
-    aws_security_group_rule.cluster_private_access_sg_source,
-    aws_security_group_rule.cluster_private_access_cidrs_source,
-  ]
-}
diff --git a/docs/.pages b/docs/.pages
new file mode 100644
index 0000000000..ee23da4f7f
--- /dev/null
+++ b/docs/.pages
@@ -0,0 +1,3 @@
+nav:
+  - Overview: index.md
+  - Local Develpment: local.md
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000..960db18448
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,14 @@
+# Documentation
+
+## Table of Contents
+
+- [Frequently Asked Questions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md)
+- [Compute Resources](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md)
+- [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md)
+- [Network Connectivity](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/network_connectivity.md)
+- Upgrade Guides
+  - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md)
+  - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md)
+  - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md)
+  - [Upgrade to v20.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md)
+  - [Upgrade to v21.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-21.0.md)
diff --git a/docs/upgrades.md b/docs/UPGRADE-17.0.md
similarity index 90%
rename from docs/upgrades.md
rename to docs/UPGRADE-17.0.md
index 88d29ae1bf..2511cf4b2d 100644
--- a/docs/upgrades.md
+++ b/docs/UPGRADE-17.0.md
@@ -6,11 +6,14 @@ In this release, we now decided to remove random_pet resources in Managed Node G
 
 1. Run `terraform apply` with the module version v16.2.0
 2. Get your worker group names
+
 ```shell
 ~ terraform state show 'module.eks.module.node_groups.aws_eks_node_group.workers["example"]' | grep node_group_name
 node_group_name = "test-eks-mwIwsvui-example-sincere-squid"
 ```
+
 3. Upgrade your module and configure your node groups to use existing names
+
 ```hcl
 module "eks" {
   source  = "terraform-aws-modules/eks/aws"
@@ -30,7 +33,8 @@ module "eks" {
   # ...
 }
 ```
-4. Run `terraform plan`, you shoud see that only `random_pets` will be destroyed
+
+4. Run `terraform plan`, you should see that only `random_pets` will be destroyed
 
 ```shell
 Terraform will perform the following actions:
@@ -55,6 +59,7 @@ Terraform will perform the following actions:
 
 Plan: 0 to add, 0 to change, 1 to destroy.
 ```
+
 5. If everything sounds good to you, run `terraform apply`
 
-After the first apply, we recommand you to create a new node group and let the module use the `node_group_name_prefix` (by removing the `name` argument) to generate names and avoid collision during node groups re-creation if needed, because the lifce cycle is `create_before_destroy = true`.
+After the first apply, we recommend you to create a new node group and let the module use the `node_group_name_prefix` (by removing the `name` argument) to generate names and avoid collision during node groups re-creation if needed, because the lifecycle is `create_before_destroy = true`.
diff --git a/docs/UPGRADE-18.0.md b/docs/UPGRADE-18.0.md
new file mode 100644
index 0000000000..8b3d0accad
--- /dev/null
+++ b/docs/UPGRADE-18.0.md
@@ -0,0 +1,764 @@
+# Upgrade from v17.x to v18.x
+
+Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce.
+
+Note: please see https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1744 where users have shared the steps/changes that have worked for their configurations to upgrade. Due to the numerous configuration possibilities, it is difficult to capture specific steps that will work for all; this has proven to be a useful thread to share collective information from the broader community regarding v18.x upgrades.
+
+For most users, adding the following to your v17.x configuration will preserve the state of your cluster control plane when upgrading to v18.x:
+
+```hcl
+prefix_separator                   = ""
+iam_role_name                      = $CLUSTER_NAME
+cluster_security_group_name        = $CLUSTER_NAME
+cluster_security_group_description = "EKS cluster security group."
+```
+
+This configuration assumes that [`create_iam_role`](https://github.com/terraform-aws-modules/terraform-aws-eks#input_create_iam_role) is set to `true`, which is the default value.
+
+As the location of the Terraform state of the IAM role has been changed from 17.x to 18.x, you'll also have to move the state before running `terraform apply` by calling:
+
+```
+terraform state mv 'module.eks.aws_iam_role.cluster[0]' 'module.eks.aws_iam_role.this[0]'
+```
+
+See more information [here](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1744#issuecomment-1027359982)
+
+## List of backwards incompatible changes
+
+- Launch configuration support has been removed and only launch template is supported going forward. AWS is no longer adding new features back into launch configuration and their docs state [`We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. We provide information about launch configurations for customers who have not yet migrated from launch configurations to launch templates.`](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
+- Support for managing aws-auth configmap has been removed. This change also removes the dependency on the Kubernetes Terraform provider, the local dependency on aws-iam-authenticator for users, as well as the reliance on the forked http provider to wait and poll on cluster creation. To aid users in this change, an output variable `aws_auth_configmap_yaml` has been provided which renders the aws-auth configmap necessary to support at least the IAM roles used by the module (additional mapRoles/mapUsers definitions to be provided by users)
+- Support for managing kubeconfig and its associated `local_file` resources have been removed; users are able to use the awscli provided `aws eks update-kubeconfig --name <cluster_name>` to update their local kubeconfig as necessary
+- The terminology used in the module has been modified to reflect that used by the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html).
+  - [AWS EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), `eks_managed_node_groups`, was previously referred to as simply node group, `node_groups`
+  - [Self Managed Node Group Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html), `self_managed_node_groups`, was previously referred to as worker group, `worker_groups`
+  - [AWS Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html), `fargate_profiles`, remains unchanged in terms of naming and terminology
+- The three different node group types supported by AWS and the module have been refactored into standalone sub-modules that are both used by the root `eks` module as well as available for individual, standalone consumption if desired.
+  - The previous `node_groups` sub-module is now named `eks-managed-node-group` and provisions a single AWS EKS Managed Node Group per sub-module definition (previous version utilized `for_each` to create 0 or more node groups)
+    - Additional changes for the `eks-managed-node-group` sub-module over the previous `node_groups` module include:
+      - Variable name changes defined in section `Variable and output changes` below
+      - Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
+      - Support for nearly full control of the security group created, or provide the ID of an existing security group, has been added
+      - User data has been revamped and all user data logic moved to the `_user_data` internal sub-module; the local `userdata.sh.tpl` has been removed entirely
+  - The previous `fargate` sub-module is now named `fargate-profile` and provisions a single AWS EKS Fargate Profile per sub-module definition (previous version utilized `for_each` to create 0 or more profiles)
+    - Additional changes for the `fargate-profile` sub-module over the previous `fargate` module include:
+      - Variable name changes defined in section `Variable and output changes` below
+      - Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
+      - Similar to the `eks_managed_node_group_defaults` and `self_managed_node_group_defaults`, a `fargate_profile_defaults` has been provided to allow users to control the default configurations for the Fargate profiles created
+  - A sub-module for `self-managed-node-group` has been created and provisions a single self managed node group (autoscaling group) per sub-module definition
+    - Additional changes for the `self-managed-node-group` sub-module over the previous `node_groups` variable include:
+      - The underlying autoscaling group and launch template have been updated to more closely match that of the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module and the features it offers
+      - The previous iteration used a count over a list of node group definitions which was prone to disruptive updates; this is now replaced with a map/for_each to align with that of the EKS managed node group and Fargate profile behaviors/style
+- The user data configuration supported across the module has been completely revamped. A new `_user_data` internal sub-module has been created to consolidate all user data configuration in one location which provides better support for testability (via the [`tests/user-data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/tests/user-data) example). The new sub-module supports nearly all possible combinations including the ability to allow users to provide their own user data template which will be rendered by the module. See the `tests/user-data` example project for the full plethora of example configuration possibilities and more details on the logic of the design can be found in the [`modules/_user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data_) directory.
+- Resource name changes may cause issues with existing resources. For example, security groups and IAM roles cannot be renamed, they must be recreated. Recreation of these resources may also trigger a recreation of the cluster. To use the legacy (< 18.x) resource naming convention, set `prefix_separator` to "".
+- Security group usage has been overhauled to provide only the bare minimum network connectivity required to launch a bare bones cluster. See the [security group documentation section](https://github.com/terraform-aws-modules/terraform-aws-eks#security-groups) for more details. Users upgrading to v18.x will want to review the rules they have in place today versus the rules provisioned by the v18.x module and ensure to make any necessary adjustments for their specific workload.
+
+## Additional changes
+
+### Added
+
+- Support for AWS EKS Addons has been added
+- Support for AWS EKS Cluster Identity Provider Configuration has been added
+- AWS Terraform provider minimum required version has been updated to 3.64 to support the changes made and additional resources supported
+- An example `user_data` project has been added to aid in demonstrating, testing, and validating the various methods of configuring user data with the `_user_data` sub-module as well as the root `eks` module
+- Template for rendering the aws-auth configmap output - `aws_auth_cm.tpl`
+- Template for Bottlerocket OS user data bootstrapping - `bottlerocket_user_data.tpl`
+
+### Modified
+
+- The previous `fargate` example has been renamed to `fargate_profile`
+- The previous `irsa` and `instance_refresh` examples have been merged into one example `irsa_autoscale_refresh`
+- The previous `managed_node_groups` example has been renamed to `self_managed_node_group`
+- The previously hardcoded EKS OIDC root CA thumbprint value and variable has been replaced with a `tls_certificate` data source that refers to the cluster OIDC issuer url. Thumbprint values should remain unchanged however
+- Individual cluster security group resources have been replaced with a single security group resource that takes a map of rules as input. The default ingress/egress rules have had their scope reduced in order to provide the bare minimum of access to permit successful cluster creation and allow users to opt in to any additional network access as needed for a better security posture. This means the `0.0.0.0/0` egress rule has been removed, instead TCP/443 and TCP/10250 egress rules to the node group security group are used instead
+- The Linux/bash user data template has been updated to include the bare minimum necessary for bootstrapping AWS EKS Optimized AMI derivative nodes with provisions for providing additional user data and configurations; was named `userdata.sh.tpl` and is now named `linux_user_data.tpl`
+- The Windows user data template has been renamed from `userdata_windows.tpl` to `windows_user_data.tpl`
+
+### Removed
+
+- Miscellaneous documents on how to configure Kubernetes cluster internals have been removed. Documentation related to how to configure the AWS EKS Cluster and its supported infrastructure resources provided by the module are supported, while cluster internal configuration is out of scope for this project
+- The previous `bottlerocket` example has been removed in favor of demonstrating the use and configuration of Bottlerocket nodes via the respective `eks_managed_node_group` and `self_managed_node_group` examples
+- The previous `launch_template` and `launch_templates_with_managed_node_groups` examples have been removed; only launch templates are now supported (default) and launch configuration support has been removed
+- The previous `secrets_encryption` example has been removed; the functionality has been demonstrated in several of the new examples rendering this standalone example redundant
+- The additional, custom IAM role policy for the cluster role has been removed. The permissions are either now provided in the attached managed AWS permission policies used or are no longer required
+- The `kubeconfig.tpl` template; kubeconfig management is no longer supported under this module
+- The HTTP Terraform provider (forked copy) dependency has been removed
+
+### Variable and output changes
+
+1. Removed variables:
+
+    - `cluster_create_timeout`, `cluster_update_timeout`, and `cluster_delete_timeout` have been replaced with `cluster_timeouts`
+    - `kubeconfig_name`
+    - `kubeconfig_output_path`
+    - `kubeconfig_file_permission`
+    - `kubeconfig_api_version`
+    - `kubeconfig_aws_authenticator_command`
+    - `kubeconfig_aws_authenticator_command_args`
+    - `kubeconfig_aws_authenticator_additional_args`
+    - `kubeconfig_aws_authenticator_env_variables`
+    - `write_kubeconfig`
+    - `default_platform`
+    - `manage_aws_auth`
+    - `aws_auth_additional_labels`
+    - `map_accounts`
+    - `map_roles`
+    - `map_users`
+    - `fargate_subnets`
+    - `worker_groups_launch_template`
+    - `worker_security_group_id`
+    - `worker_ami_name_filter`
+    - `worker_ami_name_filter_windows`
+    - `worker_ami_owner_id`
+    - `worker_ami_owner_id_windows`
+    - `worker_additional_security_group_ids`
+    - `worker_sg_ingress_from_port`
+    - `workers_additional_policies`
+    - `worker_create_security_group`
+    - `worker_create_initial_lifecycle_hooks`
+    - `worker_create_cluster_primary_security_group_rules`
+    - `cluster_create_endpoint_private_access_sg_rule`
+    - `cluster_endpoint_private_access_cidrs`
+    - `cluster_endpoint_private_access_sg`
+    - `manage_worker_iam_resources`
+    - `workers_role_name`
+    - `attach_worker_cni_policy`
+    - `eks_oidc_root_ca_thumbprint`
+    - `create_fargate_pod_execution_role`
+    - `fargate_pod_execution_role_name`
+    - `cluster_egress_cidrs`
+    - `workers_egress_cidrs`
+    - `wait_for_cluster_timeout`
+    - EKS Managed Node Group sub-module (was `node_groups`)
+      - `default_iam_role_arn`
+      - `workers_group_defaults`
+      - `worker_security_group_id`
+      - `node_groups_defaults`
+      - `node_groups`
+      - `ebs_optimized_not_supported`
+    - Fargate profile sub-module (was `fargate`)
+      - `create_eks` and `create_fargate_pod_execution_role` have been replaced with simply `create`
+
+2. Renamed variables:
+
+    - `create_eks` -> `create`
+    - `subnets` -> `subnet_ids`
+    - `cluster_create_security_group` -> `create_cluster_security_group`
+    - `cluster_log_retention_in_days` -> `cloudwatch_log_group_retention_in_days`
+    - `cluster_log_kms_key_id` -> `cloudwatch_log_group_kms_key_id`
+    - `manage_cluster_iam_resources` -> `create_iam_role`
+    - `cluster_iam_role_name` -> `iam_role_name`
+    - `permissions_boundary` -> `iam_role_permissions_boundary`
+    - `iam_path` -> `iam_role_path`
+    - `pre_userdata` -> `pre_bootstrap_user_data`
+    - `additional_userdata` -> `post_bootstrap_user_data`
+    - `worker_groups` -> `self_managed_node_groups`
+    - `workers_group_defaults` -> `self_managed_node_group_defaults`
+    - `node_groups` -> `eks_managed_node_groups`
+    - `node_groups_defaults` -> `eks_managed_node_group_defaults`
+    - EKS Managed Node Group sub-module (was `node_groups`)
+      - `create_eks` -> `create`
+      - `worker_additional_security_group_ids` -> `vpc_security_group_ids`
+    - Fargate profile sub-module
+      - `fargate_pod_execution_role_name` -> `name`
+      - `create_fargate_pod_execution_role` -> `create_iam_role`
+      - `subnets` -> `subnet_ids`
+      - `iam_path` -> `iam_role_path`
+      - `permissions_boundary` -> `iam_role_permissions_boundary`
+
+3. Added variables:
+
+    - `cluster_additional_security_group_ids` added to allow users to add additional security groups to the cluster as needed
+    - `cluster_security_group_name`
+    - `cluster_security_group_use_name_prefix` added to allow users to use either the name as specified or default to using the name specified as a prefix
+    - `cluster_security_group_description`
+    - `cluster_security_group_additional_rules`
+    - `cluster_security_group_tags`
+    - `create_cloudwatch_log_group` added in place of the logic that checked if any cluster log types were enabled to allow users to opt in as they see fit
+    - `create_node_security_group` added to create single security group that connects node groups and cluster in central location
+    - `node_security_group_id`
+    - `node_security_group_name`
+    - `node_security_group_use_name_prefix`
+    - `node_security_group_description`
+    - `node_security_group_additional_rules`
+    - `node_security_group_tags`
+    - `iam_role_arn`
+    - `iam_role_use_name_prefix`
+    - `iam_role_description`
+    - `iam_role_additional_policies`
+    - `iam_role_tags`
+    - `cluster_addons`
+    - `cluster_identity_providers`
+    - `fargate_profile_defaults`
+    - `prefix_separator` added to support legacy behavior of not having a prefix separator
+    - EKS Managed Node Group sub-module (was `node_groups`)
+      - `platform`
+      - `enable_bootstrap_user_data`
+      - `pre_bootstrap_user_data`
+      - `post_bootstrap_user_data`
+      - `bootstrap_extra_args`
+      - `user_data_template_path`
+      - `create_launch_template`
+      - `launch_template_name`
+      - `launch_template_use_name_prefix`
+      - `description`
+      - `ebs_optimized`
+      - `ami_id`
+      - `key_name`
+      - `launch_template_default_version`
+      - `update_launch_template_default_version`
+      - `disable_api_termination`
+      - `kernel_id`
+      - `ram_disk_id`
+      - `block_device_mappings`
+      - `capacity_reservation_specification`
+      - `cpu_options`
+      - `credit_specification`
+      - `elastic_gpu_specifications`
+      - `elastic_inference_accelerator`
+      - `enclave_options`
+      - `instance_market_options`
+      - `license_specifications`
+      - `metadata_options`
+      - `enable_monitoring`
+      - `network_interfaces`
+      - `placement`
+      - `min_size`
+      - `max_size`
+      - `desired_size`
+      - `use_name_prefix`
+      - `ami_type`
+      - `ami_release_version`
+      - `capacity_type`
+      - `disk_size`
+      - `force_update_version`
+      - `instance_types`
+      - `labels`
+      - `cluster_version`
+      - `launch_template_version`
+      - `remote_access`
+      - `taints`
+      - `update_config`
+      - `timeouts`
+      - `create_security_group`
+      - `security_group_name`
+      - `security_group_use_name_prefix`
+      - `security_group_description`
+      - `vpc_id`
+      - `security_group_rules`
+      - `cluster_security_group_id`
+      - `security_group_tags`
+      - `create_iam_role`
+      - `iam_role_arn`
+      - `iam_role_name`
+      - `iam_role_use_name_prefix`
+      - `iam_role_path`
+      - `iam_role_description`
+      - `iam_role_permissions_boundary`
+      - `iam_role_additional_policies`
+      - `iam_role_tags`
+    - Fargate profile sub-module (was `fargate`)
+      - `iam_role_arn` (for if `create_iam_role` is `false` to bring your own externally created role)
+      - `iam_role_name`
+      - `iam_role_use_name_prefix`
+      - `iam_role_description`
+      - `iam_role_additional_policies`
+      - `iam_role_tags`
+      - `selectors`
+      - `timeouts`
+
+4. Removed outputs:
+
+    - `cluster_version`
+    - `kubeconfig`
+    - `kubeconfig_filename`
+    - `workers_asg_arns`
+    - `workers_asg_names`
+    - `workers_user_data`
+    - `workers_default_ami_id`
+    - `workers_default_ami_id_windows`
+    - `workers_launch_template_ids`
+    - `workers_launch_template_arns`
+    - `workers_launch_template_latest_versions`
+    - `worker_security_group_id`
+    - `worker_iam_instance_profile_arns`
+    - `worker_iam_instance_profile_names`
+    - `worker_iam_role_name`
+    - `worker_iam_role_arn`
+    - `fargate_profile_ids`
+    - `fargate_profile_arns`
+    - `fargate_iam_role_name`
+    - `fargate_iam_role_arn`
+    - `node_groups`
+    - `security_group_rule_cluster_https_worker_ingress`
+    - EKS Managed Node Group sub-module (was `node_groups`)
+      - `node_groups`
+      - `aws_auth_roles`
+    - Fargate profile sub-module (was `fargate`)
+      - `aws_auth_roles`
+
+5. Renamed outputs:
+
+    - `config_map_aws_auth` -> `aws_auth_configmap_yaml`
+    - Fargate profile sub-module (was `fargate`)
+      - `fargate_profile_ids` -> `fargate_profile_id`
+      - `fargate_profile_arns` -> `fargate_profile_arn`
+
+6. Added outputs:
+
+    - `cluster_platform_version`
+    - `cluster_status`
+    - `cluster_security_group_arn`
+    - `cluster_security_group_id`
+    - `node_security_group_arn`
+    - `node_security_group_id`
+    - `cluster_iam_role_unique_id`
+    - `cluster_addons`
+    - `cluster_identity_providers`
+    - `fargate_profiles`
+    - `eks_managed_node_groups`
+    - `self_managed_node_groups`
+    - EKS Managed Node Group sub-module (was `node_groups`)
+      - `launch_template_id`
+      - `launch_template_arn`
+      - `launch_template_latest_version`
+      - `node_group_arn`
+      - `node_group_id`
+      - `node_group_resources`
+      - `node_group_status`
+      - `security_group_arn`
+      - `security_group_id`
+      - `iam_role_name`
+      - `iam_role_arn`
+      - `iam_role_unique_id`
+    - Fargate profile sub-module (was `fargate`)
+      - `iam_role_unique_id`
+      - `fargate_profile_status`
+
+## Upgrade Migrations
+
+### Before 17.x Example
+
+```hcl
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 17.0"
+
+  cluster_name                    = local.name
+  cluster_version                 = local.cluster_version
+  cluster_endpoint_private_access = true
+  cluster_endpoint_public_access  = true
+
+  vpc_id  = module.vpc.vpc_id
+  subnets = module.vpc.private_subnets
+
+  # Managed Node Groups
+  node_groups_defaults = {
+    ami_type  = "AL2_x86_64"
+    disk_size = 50
+  }
+
+  node_groups = {
+    node_group = {
+      min_capacity     = 1
+      max_capacity     = 10
+      desired_capacity = 1
+
+      instance_types = ["t3.large"]
+      capacity_type  = "SPOT"
+
+      update_config = {
+        max_unavailable_percentage = 50
+      }
+
+      k8s_labels = {
+        Environment = "test"
+        GithubRepo  = "terraform-aws-eks"
+        GithubOrg   = "terraform-aws-modules"
+      }
+
+      taints = [
+        {
+          key    = "dedicated"
+          value  = "gpuGroup"
+          effect = "NO_SCHEDULE"
+        }
+      ]
+
+      additional_tags = {
+        ExtraTag = "example"
+      }
+    }
+  }
+
+  # Worker groups
+  worker_additional_security_group_ids = [aws_security_group.additional.id]
+
+  worker_groups_launch_template = [
+    {
+      name                    = "worker-group"
+      override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
+      spot_instance_pools     = 4
+      asg_max_size            = 5
+      asg_desired_capacity    = 2
+      kubelet_extra_args      = "--node-labels=node.kubernetes.io/lifecycle=spot"
+      public_ip               = true
+    },
+  ]
+
+  # Fargate
+  fargate_profiles = {
+    default = {
+      name = "default"
+      selectors = [
+        {
+          namespace = "kube-system"
+          labels = {
+            k8s-app = "kube-dns"
+          }
+        },
+        {
+          namespace = "default"
+        }
+      ]
+
+      tags = {
+        Owner = "test"
+      }
+
+      timeouts = {
+        create = "20m"
+        delete = "20m"
+      }
+    }
+  }
+
+  tags = {
+    Environment = "test"
+    GithubRepo  = "terraform-aws-eks"
+    GithubOrg   = "terraform-aws-modules"
+  }
+}
+```
+
+### After 18.x Example
+
+```hcl
+module "cluster_after" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 18.0"
+
+  cluster_name                    = local.name
+  cluster_version                 = local.cluster_version
+  cluster_endpoint_private_access = true
+  cluster_endpoint_public_access  = true
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  eks_managed_node_group_defaults = {
+    ami_type  = "AL2_x86_64"
+    disk_size = 50
+  }
+
+  eks_managed_node_groups = {
+    node_group = {
+      min_size     = 1
+      max_size     = 10
+      desired_size = 1
+
+      instance_types = ["t3.large"]
+      capacity_type  = "SPOT"
+
+      update_config = {
+        max_unavailable_percentage = 50
+      }
+
+      labels = {
+        Environment = "test"
+        GithubRepo  = "terraform-aws-eks"
+        GithubOrg   = "terraform-aws-modules"
+      }
+
+      taints = [
+        {
+          key    = "dedicated"
+          value  = "gpuGroup"
+          effect = "NO_SCHEDULE"
+        }
+      ]
+
+      tags = {
+        ExtraTag = "example"
+      }
+    }
+  }
+
+  self_managed_node_group_defaults = {
+    vpc_security_group_ids = [aws_security_group.additional.id]
+  }
+
+  self_managed_node_groups = {
+    worker_group = {
+      name = "worker-group"
+
+      min_size      = 1
+      max_size      = 5
+      desired_size  = 2
+      instance_type = "m4.large"
+
+      bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+      block_device_mappings = {
+        xvda = {
+          device_name = "/dev/xvda"
+          ebs = {
+            delete_on_termination = true
+            encrypted             = false
+            volume_size           = 100
+            volume_type           = "gp2"
+          }
+
+        }
+      }
+
+      use_mixed_instances_policy = true
+      mixed_instances_policy = {
+        instances_distribution = {
+          spot_instance_pools = 4
+        }
+
+        override = [
+          { instance_type = "m5.large" },
+          { instance_type = "m5a.large" },
+          { instance_type = "m5d.large" },
+          { instance_type = "m5ad.large" },
+        ]
+      }
+    }
+  }
+
+  # Fargate
+  fargate_profiles = {
+    default = {
+      name = "default"
+
+      selectors = [
+        {
+          namespace = "kube-system"
+          labels = {
+            k8s-app = "kube-dns"
+          }
+        },
+        {
+          namespace = "default"
+        }
+      ]
+
+      tags = {
+        Owner = "test"
+      }
+
+      timeouts = {
+        create = "20m"
+        delete = "20m"
+      }
+    }
+  }
+
+  tags = {
+    Environment = "test"
+    GithubRepo  = "terraform-aws-eks"
+    GithubOrg   = "terraform-aws-modules"
+  }
+}
+```
+
+### Diff of before <> after
+
+```diff
+ module "eks" {
+   source  = "terraform-aws-modules/eks/aws"
+-  version = "~> 17.0"
++  version = "~> 18.0"
+
+   cluster_name                    = local.name
+   cluster_version                 = local.cluster_version
+   cluster_endpoint_private_access = true
+   cluster_endpoint_public_access  = true
+
+   vpc_id  = module.vpc.vpc_id
+-  subnets = module.vpc.private_subnets
++  subnet_ids = module.vpc.private_subnets
+
+-  # Managed Node Groups
+-  node_groups_defaults = {
++  eks_managed_node_group_defaults = {
+     ami_type  = "AL2_x86_64"
+     disk_size = 50
+   }
+
+-  node_groups = {
++  eks_managed_node_groups = {
+     node_group = {
+-      min_capacity     = 1
+-      max_capacity     = 10
+-      desired_capacity = 1
++      min_size     = 1
++      max_size     = 10
++      desired_size = 1
+
+       instance_types = ["t3.large"]
+       capacity_type  = "SPOT"
+
+       update_config = {
+         max_unavailable_percentage = 50
+       }
+
+-      k8s_labels = {
++      labels = {
+         Environment = "test"
+         GithubRepo  = "terraform-aws-eks"
+         GithubOrg   = "terraform-aws-modules"
+       }
+
+       taints = [
+         {
+           key    = "dedicated"
+           value  = "gpuGroup"
+           effect = "NO_SCHEDULE"
+         }
+       ]
+
+-      additional_tags = {
++      tags = {
+         ExtraTag = "example"
+       }
+     }
+   }
+
+-  # Worker groups
+-  worker_additional_security_group_ids = [aws_security_group.additional.id]
+-
+-  worker_groups_launch_template = [
+-    {
+-      name                    = "worker-group"
+-      override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
+-      spot_instance_pools     = 4
+-      asg_max_size            = 5
+-      asg_desired_capacity    = 2
+-      kubelet_extra_args      = "--node-labels=node.kubernetes.io/lifecycle=spot"
+-      public_ip               = true
+-    },
+-  ]
++  self_managed_node_group_defaults = {
++    vpc_security_group_ids = [aws_security_group.additional.id]
++  }
++
++  self_managed_node_groups = {
++    worker_group = {
++      name = "worker-group"
++
++      min_size      = 1
++      max_size      = 5
++      desired_size  = 2
++      instance_type = "m4.large"
++
++      bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
++
++      block_device_mappings = {
++        xvda = {
++          device_name = "/dev/xvda"
++          ebs = {
++            delete_on_termination = true
++            encrypted             = false
++            volume_size           = 100
++            volume_type           = "gp2"
++          }
++
++        }
++      }
++
++      use_mixed_instances_policy = true
++      mixed_instances_policy = {
++        instances_distribution = {
++          spot_instance_pools = 4
++        }
++
++        override = [
++          { instance_type = "m5.large" },
++          { instance_type = "m5a.large" },
++          { instance_type = "m5d.large" },
++          { instance_type = "m5ad.large" },
++        ]
++      }
++    }
++  }
+
+   # Fargate
+   fargate_profiles = {
+     default = {
+       name = "default"
+       selectors = [
+         {
+           namespace = "kube-system"
+           labels = {
+             k8s-app = "kube-dns"
+           }
+         },
+         {
+           namespace = "default"
+         }
+       ]
+
+       tags = {
+         Owner = "test"
+       }
+
+       timeouts = {
+         create = "20m"
+         delete = "20m"
+       }
+     }
+   }
+
+   tags = {
+     Environment = "test"
+     GithubRepo  = "terraform-aws-eks"
+     GithubOrg   = "terraform-aws-modules"
+   }
+ }
+
+```
+
+### Attaching an IAM role policy to a Fargate profile
+
+#### Before 17.x
+
+```hcl
+resource "aws_iam_role_policy_attachment" "default" {
+  role       = module.eks.fargate_iam_role_name
+  policy_arn = aws_iam_policy.default.arn
+}
+```
+
+#### After 18.x
+
+```hcl
+# Attach the policy to an "example" Fargate profile
+resource "aws_iam_role_policy_attachment" "default" {
+  role       = module.eks.fargate_profiles["example"].iam_role_name
+  policy_arn = aws_iam_policy.default.arn
+}
+```
+
+Or:
+
+```hcl
+# Attach the policy to all Fargate profiles
+resource "aws_iam_role_policy_attachment" "default" {
+  for_each = module.eks.fargate_profiles
+
+  role       = each.value.iam_role_name
+  policy_arn = aws_iam_policy.default.arn
+}
+```
diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md
new file mode 100644
index 0000000000..f626129be1
--- /dev/null
+++ b/docs/UPGRADE-19.0.md
@@ -0,0 +1,470 @@
+# Upgrade from v18.x to v19.x
+
+Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce.
+
+## List of backwards incompatible changes
+
+- The `cluster_id` output used to output the name of the cluster. This is due to the fact that the cluster name is a unique constraint and therefore its set as the unique identifier within Terraform's state map. However, starting with local EKS clusters created on Outposts, there is now an attribute returned from the `aws eks create-cluster` API named `id`. The `cluster_id` has been updated to return this value which means that for current, standard EKS clusters created in the AWS cloud, no value will be returned (at the time of this writing) for `cluster_id` and only local EKS clusters on Outposts will return a value that looks like a UUID/GUID. Users should switch all instances of `cluster_id` to use `cluster_name` before upgrading to v19. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560)
+- Minimum supported version of Terraform AWS provider updated to v4.45 to support the latest features provided via the resources utilized.
+- Minimum supported version of Terraform updated to v1.0
+- Individual security group created per EKS managed node group or self-managed node group has been removed. This configuration went mostly unused and would often cause confusion ("Why is there an empty security group attached to my nodes?"). This functionality can easily be replicated by user's providing one or more externally created security groups to attach to nodes launched from the node group.
+- Previously, `var.iam_role_additional_policies` (one for each of the following: cluster IAM role, EKS managed node group IAM role, self-managed node group IAM role, and Fargate Profile IAM role) accepted a list of strings. This worked well for policies that already existed but failed for policies being created at the same time as the cluster due to the well-known issue of unknown values used in a `for_each` loop. To rectify this issue in `v19.x`, two changes were made:
+  1. `var.iam_role_additional_policies` was changed from type `list(string)` to type `map(string)` -> this is a breaking change. More information on managing this change can be found below, under `Terraform State Moves`
+  2. The logic used in the root module for this variable was changed to replace the use of `try()` with `lookup()`. More details on why can be found [here](https://github.com/clowdhaus/terraform-for-each-unknown)
+- The cluster name has been removed from the Karpenter module event rule names. Due to the use of long cluster names appending to the provided naming scheme, the cluster name has moved to a `ClusterName` tag and the event rule name is now a prefix. This guarantees that users can have multiple instances of Karpenter with their respective event rules/SQS queue without name collisions, while also still being able to identify which queues and event rules belong to which cluster.
+- The new variable `node_security_group_enable_recommended_rules` is set to true by default and may conflict with any custom ingress/egress rules. Please ensure that any duplicates from the `node_security_group_additional_rules` are removed before upgrading, or set `node_security_group_enable_recommended_rules` to false. [Reference](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md#added)
+
+## Additional changes
+
+### Added
+
+- Support for setting `preserve` as well as `most_recent` on addons.
+  - `preserve` indicates if you want to preserve the created resources when deleting the EKS add-on
+  - `most_recent` indicates if you want to use the most recent revision of the add-on or the default version (default)
+- Support for setting default node security group rules for common access patterns required:
+  - Egress all for `0.0.0.0/0`/`::/0`
+  - Ingress from cluster security group for 8443/TCP and 9443/TCP for common applications such as ALB Ingress Controller, Karpenter, OPA Gatekeeper, etc. These are commonly used as webhook ports for validating and mutating webhooks
+
+### Modified
+
+- `cluster_security_group_additional_rules` and `node_security_group_additional_rules` have been modified to use `lookup()` instead of `try()` to avoid the well-known issue of [unknown values within a `for_each` loop](https://github.com/hashicorp/terraform/issues/4149)
+- Default cluster security group rules have removed egress rules for TCP/443 and TCP/10250 to node groups since the cluster primary security group includes a default rule for ALL to `0.0.0.0/0`/`::/0`
+- Default node security group rules have removed egress rules have been removed since the default security group settings have egress rule for ALL to `0.0.0.0/0`/`::/0`
+- `block_device_mappings` previously required a map of maps but has since changed to an array of maps. Users can remove the outer key for each block device mapping and replace the outermost map `{}` with an array `[]`. There are no state changes required for this change.
+- `create_kms_key` previously defaulted to `false` and now defaults to `true`. Clusters created with this module now default to enabling secret encryption by default with a customer-managed KMS key created by this module
+- `cluster_encryption_config` previously used a type of `list(any)` and now uses a type of `any` -> users can simply remove the outer `[`...`]` brackets on `v19.x`
+  - `cluster_encryption_config` previously defaulted to `[]` and now defaults to `{resources = ["secrets"]}` to encrypt secrets by default
+- `cluster_endpoint_public_access` previously defaulted to `true` and now defaults to `false`. Clusters created with this module now default to private-only access to the cluster endpoint
+  - `cluster_endpoint_private_access` previously defaulted to `false` and now defaults to `true`
+- The addon configuration now sets `"OVERWRITE"` as the default value for `resolve_conflicts` to ease add-on upgrade management. Users can opt out of this by instead setting `"NONE"` as the value for `resolve_conflicts`
+- The `kms` module used has been updated from `v1.0.2` to `v1.1.0` - no material changes other than updated to latest
+- The default value for EKS managed node group `update_config` has been updated to the recommended `{ max_unavailable_percentage = 33 }`
+- The default value for the self-managed node group `instance_refresh` has been updated to the recommended:
+    ```hcl
+    {
+      strategy = "Rolling"
+      preferences = {
+        min_healthy_percentage = 66
+      }
+    }
+    ```
+
+### Removed
+
+- Remove all references of `aws_default_tags` to avoid update conflicts; this is the responsibility of the provider and should be handled at the provider level
+  - https://github.com/terraform-aws-modules/terraform-aws-eks/issues?q=is%3Aissue+default_tags+is%3Aclosed
+  - https://github.com/terraform-aws-modules/terraform-aws-eks/pulls?q=is%3Apr+default_tags+is%3Aclosed
+
+### Variable and output changes
+
+1. Removed variables:
+
+   - `node_security_group_ntp_ipv4_cidr_block` - default security group settings have an egress rule for ALL to `0.0.0.0/0`/`::/0`
+   - `node_security_group_ntp_ipv6_cidr_block` - default security group settings have an egress rule for ALL to `0.0.0.0/0`/`::/0`
+   - Self-managed node groups:
+     - `create_security_group`
+     - `security_group_name`
+     - `security_group_use_name_prefix`
+     - `security_group_description`
+     - `security_group_rules`
+     - `security_group_tags`
+     - `cluster_security_group_id`
+     - `vpc_id`
+   - EKS managed node groups:
+     - `create_security_group`
+     - `security_group_name`
+     - `security_group_use_name_prefix`
+     - `security_group_description`
+     - `security_group_rules`
+     - `security_group_tags`
+     - `cluster_security_group_id`
+     - `vpc_id`
+
+2. Renamed variables:
+
+   - N/A
+
+3. Added variables:
+
+   - `provision_on_outpost`for Outposts support
+   - `outpost_config` for Outposts support
+   - `cluster_addons_timeouts` for setting a common set of timeouts for all addons (unless a specific value is provided within the addon configuration)
+   - `service_ipv6_cidr` for setting the IPv6 CIDR block for the Kubernetes service addresses
+   - `node_security_group_enable_recommended_rules` for enabling recommended node security group rules for common access patterns
+
+   - Self-managed node groups:
+     - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204)
+     - `maintenance_options`
+     - `private_dns_name_options`
+     - `instance_requirements`
+     - `context`
+     - `default_instance_warmup`
+     - `force_delete_warm_pool`
+   - EKS managed node groups:
+     - `use_custom_launch_template` was added to better clarify how users can switch between a custom launch template or the default launch template provided by the EKS managed node group. Previously, to achieve this same functionality of using the default launch template, users needed to set `create_launch_template = false` and `launch_template_name = ""` which is not very intuitive.
+     - `launch_template_id` for use when using an existing/externally created launch template (Ref: https://github.com/terraform-aws-modules/terraform-aws-autoscaling/pull/204)
+     - `maintenance_options`
+     - `private_dns_name_options`
+     -
+4. Removed outputs:
+
+   - Self-managed node groups:
+     - `security_group_arn`
+     - `security_group_id`
+   - EKS managed node groups:
+     - `security_group_arn`
+     - `security_group_id`
+
+5. Renamed outputs:
+
+   - `cluster_id` is not renamed but the value it returns is now different. For standard EKS clusters created in the AWS cloud, the value returned at the time of this writing is `null`/empty. For local EKS clusters created on Outposts, the value returned will look like a UUID/GUID. Users should switch all instances of `cluster_id` to use `cluster_name` before upgrading to v19. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560)
+
+6. Added outputs:
+
+   - `cluster_name` - The `cluster_id` currently set by the AWS provider is actually the cluster name, but in the future, this will change and there will be a distinction between the `cluster_name` and `cluster_id`. [Reference](https://github.com/hashicorp/terraform-provider-aws/issues/27560)
+
+## Upgrade Migrations
+
+1. Before upgrading your module definition to `v19.x`, please see below for both EKS managed node group(s) and self-managed node groups and remove the node group(s) security group prior to upgrading.
+
+### Self-Managed Node Groups
+
+Self-managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the shared node security group). While still using version `v18.x` of your module definition, remove this security group from your node groups by setting `create_security_group = false`.
+
+- If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal.
+- For most users, the security group is not used and can be safely removed. However, deployed instances will have the security group attached to nodes and require the security group to be disassociated before the security group can be deleted. Because instances are deployed via autoscaling groups, we cannot simply remove the security group from the code and have those changes reflected on the instances. Instead, we have to update the code and then trigger the autoscaling groups to cycle the instances deployed so that new instances are provisioned without the security group attached. You can utilize the `instance_refresh` parameter of Autoscaling groups to force nodes to re-deploy when removing the security group since changes to launch templates automatically trigger an instance refresh. An example configuration is provided below.
+  - Add the following to either/or `self_managed_node_group_defaults` or the individual self-managed node group definitions:
+    ```hcl
+    create_security_group = false
+    instance_refresh = {
+      strategy = "Rolling"
+      preferences = {
+        min_healthy_percentage = 66
+      }
+    }
+    ```
+- It is recommended to use the `aws-node-termination-handler` while performing this update. Please refer to the [`irsa-autoscale-refresh` example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/20af82846b4a1f23f3787a8c455f39c0b6164d80/examples/irsa_autoscale_refresh/charts.tf#L86) for usage. This will ensure that pods are safely evicted in a controlled manner to avoid service disruptions.
+- Once the necessary configurations are in place, you can apply the changes which will:
+  1. Create a new launch template (version) without the self-managed node group security group
+  2. Replace instances based on the `instance_refresh` configuration settings
+  3. New instances will launch without the self-managed node group security group, and prior instances will be terminated
+  4. Once the self-managed node group has cycled, the security group will be deleted
+
+### EKS Managed Node Groups
+
+EKS managed node groups on `v18.x` by default create a security group that does not specify any rules. In `v19.x`, this security group has been removed due to the predominant lack of usage (most users rely on the shared node security group). While still using version `v18.x` of your module definition, remove this security group from your node groups by setting `create_security_group = false`.
+
+- If you are currently utilizing this security group, it is recommended to create an additional security group that matches the rules/settings of the security group created by the node group, and specify that security group ID in `vpc_security_group_ids`. Once this is in place, you can proceed with the original security group removal.
+- EKS managed node groups rollout changes using a [rolling update strategy](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-update-behavior.html) that can be influenced through `update_config`. No additional changes are required for removing the security group created by node groups (unlike self-managed node groups which should utilize the `instance_refresh` setting of Autoscaling groups).
+- Once `create_security_group = false` has been set, you can apply the changes which will:
+  1. Create a new launch template (version) without the EKS managed node group security group
+  2. Replace instances based on the `update_config` configuration settings
+  3. New instances will launch without the EKS managed node group security group, and prior instances will be terminated
+  4. Once the EKS managed node group has cycled, the security group will be deleted
+
+2. Once the node group security group(s) have been removed, you can update your module definition to specify the `v19.x` version of the module
+3. Run `terraform init -upgrade=true` to update your configuration and pull in the v19 changes
+4. Using the documentation provided above, update your module definition to reflect the changes in the module from `v18.x` to `v19.x`. You can utilize `terraform plan` as you go to help highlight any changes that you wish to make. See below for `terraform state mv ...` commands related to the use of `iam_role_additional_policies`. If you are not providing any values to these variables, you can skip this section.
+5. Once you are satisfied with the changes and the `terraform plan` output, you can apply the changes to sync your infrastructure with the updated module definition (or vice versa).
+
+### Diff of Before (v18.x) vs After (v19.x)
+
+```diff
+ module "eks" {
+   source  = "terraform-aws-modules/eks/aws"
+-  version = "~> 18.0"
++  version = "~> 19.0"
+
+  cluster_name                    = local.name
++ cluster_endpoint_public_access  = true
+- cluster_endpoint_private_access = true # now the default
+
+  cluster_addons = {
+-   resolve_conflicts = "OVERWRITE" # now the default
++   preserve          = true
++   most_recent       = true
+
++   timeouts = {
++     create = "25m"
++     delete = "10m"
+    }
+    kube-proxy = {}
+    vpc-cni = {
+-     resolve_conflicts = "OVERWRITE" # now the default
+    }
+  }
+
+  # Encryption key
+  create_kms_key = true
+- cluster_encryption_config = [{
+-   resources = ["secrets"]
+- }]
++ cluster_encryption_config = {
++   resources = ["secrets"]
++ }
+  kms_key_deletion_window_in_days = 7
+  enable_kms_key_rotation         = true
+
+- iam_role_additional_policies = [aws_iam_policy.additional.arn]
++ iam_role_additional_policies = {
++   additional = aws_iam_policy.additional.arn
++ }
+
+  vpc_id                   = module.vpc.vpc_id
+  subnet_ids               = module.vpc.private_subnets
+  control_plane_subnet_ids = module.vpc.intra_subnets
+
+  # Extend node-to-node security group rules
+- node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"] # now the default
+  node_security_group_additional_rules = {
+-    ingress_self_ephemeral = {
+-      description = "Node to node ephemeral ports"
+-      protocol    = "tcp"
+-      from_port   = 0
+-      to_port     = 0
+-      type        = "ingress"
+-      self        = true
+-    }
+-    egress_all = {
+-      description      = "Node all egress"
+-      protocol         = "-1"
+-      from_port        = 0
+-      to_port          = 0
+-      type             = "egress"
+-      cidr_blocks      = ["0.0.0.0/0"]
+-      ipv6_cidr_blocks = ["::/0"]
+-    }
+  }
+
+  # Self-Managed Node Group(s)
+  self_managed_node_group_defaults = {
+    vpc_security_group_ids = [aws_security_group.additional.id]
+-   iam_role_additional_policies = [aws_iam_policy.additional.arn]
++   iam_role_additional_policies = {
++     additional = aws_iam_policy.additional.arn
++   }
+  }
+
+  self_managed_node_groups = {
+    spot = {
+      instance_type = "m5.large"
+      instance_market_options = {
+        market_type = "spot"
+      }
+
+      pre_bootstrap_user_data = <<-EOT
+        echo "foo"
+        export FOO=bar
+      EOT
+
+      bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+      post_bootstrap_user_data = <<-EOT
+        cd /tmp
+        sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+        sudo systemctl enable amazon-ssm-agent
+        sudo systemctl start amazon-ssm-agent
+      EOT
+
+-     create_security_group          = true
+-     security_group_name            = "eks-managed-node-group-complete-example"
+-     security_group_use_name_prefix = false
+-     security_group_description     = "EKS managed node group complete example security group"
+-     security_group_rules = {}
+-     security_group_tags = {}
+    }
+  }
+
+  # EKS Managed Node Group(s)
+  eks_managed_node_group_defaults = {
+    ami_type       = "AL2_x86_64"
+    instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+
+    attach_cluster_primary_security_group = true
+    vpc_security_group_ids                = [aws_security_group.additional.id]
+-   iam_role_additional_policies = [aws_iam_policy.additional.arn]
++   iam_role_additional_policies = {
++     additional = aws_iam_policy.additional.arn
++   }
+  }
+
+  eks_managed_node_groups = {
+    blue = {}
+    green = {
+      min_size     = 1
+      max_size     = 10
+      desired_size = 1
+
+      instance_types = ["t3.large"]
+      capacity_type  = "SPOT"
+      labels = {
+        Environment = "test"
+        GithubRepo  = "terraform-aws-eks"
+        GithubOrg   = "terraform-aws-modules"
+      }
+
+      taints = {
+        dedicated = {
+          key    = "dedicated"
+          value  = "gpuGroup"
+          effect = "NO_SCHEDULE"
+        }
+      }
+
+      update_config = {
+        max_unavailable_percentage = 33 # or set `max_unavailable`
+      }
+
+-     create_security_group          = true
+-     security_group_name            = "eks-managed-node-group-complete-example"
+-     security_group_use_name_prefix = false
+-     security_group_description     = "EKS managed node group complete example security group"
+-     security_group_rules = {}
+-     security_group_tags = {}
+
+      tags = {
+        ExtraTag = "example"
+      }
+    }
+  }
+
+  # Fargate Profile(s)
+  fargate_profile_defaults = {
+-   iam_role_additional_policies = [aws_iam_policy.additional.arn]
++   iam_role_additional_policies = {
++     additional = aws_iam_policy.additional.arn
++   }
+  }
+
+  fargate_profiles = {
+    default = {
+      name = "default"
+      selectors = [
+        {
+          namespace = "kube-system"
+          labels = {
+            k8s-app = "kube-dns"
+          }
+        },
+        {
+          namespace = "default"
+        }
+      ]
+
+      tags = {
+        Owner = "test"
+      }
+
+      timeouts = {
+        create = "20m"
+        delete = "20m"
+      }
+    }
+  }
+
+  # OIDC Identity provider
+  cluster_identity_providers = {
+    cognito = {
+      client_id      = "702vqsrjicklgb7c5b7b50i1gc"
+      issuer_url     = "https://cognito-idp.us-west-2.amazonaws.com/us-west-2_re1u6bpRA"
+      username_claim = "email"
+      groups_claim   = "cognito:groups"
+      groups_prefix  = "gid:"
+    }
+  }
+
+  # aws-auth configmap
+  manage_aws_auth_configmap = true
+
+  aws_auth_node_iam_role_arns_non_windows = [
+    module.eks_managed_node_group.iam_role_arn,
+    module.self_managed_node_group.iam_role_arn,
+  ]
+  aws_auth_fargate_profile_pod_execution_role_arns = [
+    module.fargate_profile.fargate_profile_pod_execution_role_arn
+  ]
+
+  aws_auth_roles = [
+    {
+      rolearn  = "arn:aws:iam::66666666666:role/role1"
+      username = "role1"
+      groups   = ["system:masters"]
+    },
+  ]
+
+  aws_auth_users = [
+    {
+      userarn  = "arn:aws:iam::66666666666:user/user1"
+      username = "user1"
+      groups   = ["system:masters"]
+    },
+    {
+      userarn  = "arn:aws:iam::66666666666:user/user2"
+      username = "user2"
+      groups   = ["system:masters"]
+    },
+  ]
+
+  aws_auth_accounts = [
+    "777777777777",
+    "888888888888",
+  ]
+
+  tags = local.tags
+}
+```
+
+## Terraform State Moves
+
+The following Terraform state move commands are optional but recommended if you are providing additional IAM policies that are to be attached to IAM roles created by this module (cluster IAM role, node group IAM role, Fargate profile IAM role). Because the resources affected are `aws_iam_role_policy_attachment`, in theory, you could get away with simply applying the configuration and letting Terraform detach and re-attach the policies. However, during this brief period of update, you could experience permission failures as the policy is detached and re-attached, and therefore the state move route is recommended.
+
+Where `"<POLICY_ARN>"` is specified, this should be replaced with the full ARN of the policy, and `"<POLICY_MAP_KEY>"` should be replaced with the key used in the `iam_role_additional_policies` map for the associated policy. For example, if you have the following`v19.x` configuration:
+
+```hcl
+  ...
+  # This is demonstrating the cluster IAM role additional policies
+  iam_role_additional_policies = {
+    additional = aws_iam_policy.additional.arn
+  }
+  ...
+```
+
+The associated state move command would look similar to (albeit with your correct policy ARN):
+
+```sh
+terraform state mv 'module.eks.aws_iam_role_policy_attachment.this["arn:aws:iam::111111111111:policy/ex-complete-additional"]' 'module.eks.aws_iam_role_policy_attachment.additional["additional"]'
+```
+
+If you are not providing any additional IAM policies, no actions are required.
+
+### Cluster IAM Role
+
+Repeat for each policy provided in `iam_role_additional_policies`:
+
+```sh
+terraform state mv 'module.eks.aws_iam_role_policy_attachment.this["<POLICY_ARN>"]' 'module.eks.aws_iam_role_policy_attachment.additional["<POLICY_MAP_KEY>"]'
+```
+
+### EKS Managed Node Group IAM Role
+
+Where `"<NODE_GROUP_KEY>"` is the key used in the `eks_managed_node_groups` map for the associated node group. Repeat for each policy provided in `iam_role_additional_policies` in either/or `eks_managed_node_group_defaults` or the individual node group definitions:
+
+```sh
+terraform state mv 'module.eks.module.eks_managed_node_group["<NODE_GROUP_KEY>"].aws_iam_role_policy_attachment.this["<POLICY_ARN>"]' 'module.eks.module.eks_managed_node_group["<NODE_GROUP_KEY>"].aws_iam_role_policy_attachment.additional["<POLICY_MAP_KEY>"]'
+```
+
+### Self-Managed Node Group IAM Role
+
+Where `"<NODE_GROUP_KEY>"` is the key used in the `self_managed_node_groups` map for the associated node group. Repeat for each policy provided in `iam_role_additional_policies` in either/or `self_managed_node_group_defaults` or the individual node group definitions:
+
+```sh
+terraform state mv 'module.eks.module.self_managed_node_group["<NODE_GROUP_KEY>"].aws_iam_role_policy_attachment.this["<POLICY_ARN>"]' 'module.eks.module.self_managed_node_group["<NODE_GROUP_KEY>"].aws_iam_role_policy_attachment.additional["<POLICY_MAP_KEY>"]'
+```
+
+### Fargate Profile IAM Role
+
+Where `"<FARGATE_PROFILE_KEY>"` is the key used in the `fargate_profiles` map for the associated profile. Repeat for each policy provided in `iam_role_additional_policies` in either/or `fargate_profile_defaults` or the individual profile definitions:
+
+```sh
+terraform state mv 'module.eks.module.fargate_profile["<FARGATE_PROFILE_KEY>"].aws_iam_role_policy_attachment.this["<POLICY_ARN>"]' 'module.eks.module.fargate_profile["<FARGATE_PROFILE_KEY>"].aws_iam_role_policy_attachment.additional["<POLICY_MAP_KEY>"]'
+```
diff --git a/docs/UPGRADE-20.0.md b/docs/UPGRADE-20.0.md
new file mode 100644
index 0000000000..629cf395b5
--- /dev/null
+++ b/docs/UPGRADE-20.0.md
@@ -0,0 +1,254 @@
+# Upgrade from v19.x to v20.x
+
+Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce.
+
+## List of backwards incompatible changes
+
+- Minium supported AWS provider version increased to `v5.34`
+- Minimum supported Terraform version increased to `v1.3` to support Terraform state `moved` blocks as well as other advanced features
+- The `resolve_conflicts` argument within the `cluster_addons` configuration has been replaced with `resolve_conflicts_on_create` and `resolve_conflicts_on_update` now that `resolve_conflicts` is deprecated
+- The default/fallback value for the `preserve` argument of `cluster_addons`is now set to `true`. This has shown to be useful for users deprovisioning clusters while avoiding the situation where the CNI is deleted too early and causes resources to be left orphaned resulting in conflicts.
+- The Karpenter sub-module's use of the `irsa` naming convention has been removed, along with an update to the Karpenter controller IAM policy to align with Karpenter's `v1beta1`/`v0.32` changes. Instead of referring to the role as `irsa` or `pod_identity`, its simply just an IAM role used by the Karpenter controller and there is support for use with either IRSA and/or Pod Identity (default) at this time
+- The `aws-auth` ConfigMap resources have been moved to a standalone sub-module. This removes the Kubernetes provider requirement from the main module and allows for the `aws-auth` ConfigMap to be managed independently of the main module. This sub-module will be removed entirely in the next major release.
+- Support for cluster access management has been added with the default authentication mode set as `API_AND_CONFIG_MAP`. Support for `CONFIG_MAP` is no longer supported; instead you will need to use `API_AND_CONFIG_MAP` at minimum
+- Karpenter EventBridge rule key `spot_interrupt` updated to correct mis-spelling (was `spot_interupt`). This will cause the rule to be replaced
+
+### ⚠️ Upcoming Changes Planned in v21.0 ⚠️
+
+To give users advanced notice and provide some future direction for this module, these are the following changes we will be looking to make in the next major release of this module:
+
+1. The `aws-auth` sub-module will be removed entirely from the project. Since this sub-module is captured in the v20.x releases, users can continue using it even after the module moves forward with the next major version. The long term strategy and direction is cluster access entry and to rely only on the AWS Terraform provider.
+2. The default value for `authentication_mode` will change to `API`. Aligning with point 1 above, this is a one way change, but users are free to specify the value of their choosing in place of this default (when the change is made). This module will proceed with an EKS API first strategy.
+3. The launch template and autoscaling group usage contained within the EKS managed node group and self-managed node group sub-modules *might be replaced with the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module. At minimum, it makes sense to replace most of functionality in the self-managed node group module with this external module, but its not yet clear if there is any benefit of using it in the EKS managed node group sub-module. The interface that users interact with will stay the same, the changes will be internal to the implementation and we will do everything we can to keep the disruption to a minimum.
+4. The `platform` variable will be replaced and instead `ami_type` will become the standard across both self-managed node group(s) and EKS managed node group(s). As EKS expands its portfolio of supported operating systems, the `ami_type` is better suited to associate the correct user data format to the respective OS. The `platform` variable is a legacy artifact of self-managed node groups but not as descriptive as the `ami_type`, and therefore it will be removed in favor of `ami_type`.
+
+## Additional changes
+
+### Added
+
+   - A module tag has been added to the cluster control plane
+   - Support for cluster access entries. The `bootstrap_cluster_creator_admin_permissions` setting on the control plane has been hardcoded to `false` since this operation is a one time operation only at cluster creation per the EKS API. Instead, users can enable/disable `enable_cluster_creator_admin_permissions` at any time to achieve the same functionality. This takes the identity that Terraform is using to make API calls and maps it into a cluster admin via an access entry. For users on existing clusters, you will need to remove the default cluster administrator that was created by EKS prior to the cluster access entry APIs - see the section [`Removing the default cluster administrator`](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for more details.
+   - Support for specifying the CloudWatch log group class (standard or infrequent access)
+   - Native support for Windows based managed node groups similar to AL2 and Bottlerocket
+   - Self-managed node groups now support `instance_maintenance_policy` and have added `max_healthy_percentage`, `scale_in_protected_instances`, and `standby_instances` arguments to the `instance_refresh.preferences` block
+
+### Modified
+
+   - For `sts:AssumeRole` permissions by services, the use of dynamically looking up the DNS suffix has been replaced with the static value of `amazonaws.com`. This does not appear to change by partition and instead requires users to set this manually for non-commercial regions.
+   - The default value for `kms_key_enable_default_policy` has changed from `false` to `true` to align with the default behavior of the `aws_kms_key` resource
+   - The Karpenter default value for `create_instance_profile` has changed from `true` to `false` to align with the changes in Karpenter v0.32
+   - The Karpenter variable `create_instance_profile` default value has changed from `true` to `false`. Starting with Karpenter `v0.32.0`, Karpenter accepts an IAM role and creates the EC2 instance profile used by the nodes
+
+### Removed
+
+   - The `complete` example has been removed due to its redundancy with the other examples
+   - References to the IRSA sub-module in the IAM repository have been removed. Once https://github.com/clowdhaus/terraform-aws-eks-pod-identity has been updated and moved into the organization, the documentation here will be updated to mention the new module.
+
+### Variable and output changes
+
+1. Removed variables:
+
+   - `cluster_iam_role_dns_suffix` - replaced with a static string of `amazonaws.com`
+   - `manage_aws_auth_configmap`
+   - `create_aws_auth_configmap`
+   - `aws_auth_node_iam_role_arns_non_windows`
+   - `aws_auth_node_iam_role_arns_windows`
+   - `aws_auth_fargate_profile_pod_execution_role_arn`
+   - `aws_auth_roles`
+   - `aws_auth_users`
+   - `aws_auth_accounts`
+
+   - Karpenter
+      - `irsa_tag_key`
+      - `irsa_tag_values`
+      - `irsa_subnet_account_id`
+      - `enable_karpenter_instance_profile_creation`
+
+2. Renamed variables:
+
+   - Karpenter
+      - `create_irsa` -> `create_iam_role`
+      - `irsa_name` -> `iam_role_name`
+      - `irsa_use_name_prefix` -> `iam_role_name_prefix`
+      - `irsa_path` -> `iam_role_path`
+      - `irsa_description` -> `iam_role_description`
+      - `irsa_max_session_duration` -> `iam_role_max_session_duration`
+      - `irsa_permissions_boundary_arn` -> `iam_role_permissions_boundary_arn`
+      - `irsa_tags` -> `iam_role_tags`
+      - `policies` -> `iam_role_policies`
+      - `irsa_policy_name` -> `iam_policy_name`
+      - `irsa_ssm_parameter_arns` -> `ami_id_ssm_parameter_arns`
+      - `create_iam_role` -> `create_node_iam_role`
+      - `iam_role_additional_policies` -> `node_iam_role_additional_policies`
+      - `policies` -> `iam_role_policies`
+      - `iam_role_arn` -> `node_iam_role_arn`
+      - `iam_role_name` -> `node_iam_role_name`
+      - `iam_role_name_prefix` -> `node_iam_role_name_prefix`
+      - `iam_role_path` -> `node_iam_role_path`
+      - `iam_role_description` -> `node_iam_role_description`
+      - `iam_role_max_session_duration` -> `node_iam_role_max_session_duration`
+      - `iam_role_permissions_boundary_arn` -> `node_iam_role_permissions_boundary_arn`
+      - `iam_role_attach_cni_policy` -> `node_iam_role_attach_cni_policy`
+      - `iam_role_additional_policies` -> `node_iam_role_additional_policies`
+      - `iam_role_tags` -> `node_iam_role_tags`
+
+3. Added variables:
+
+   - `create_access_entry`
+   - `enable_cluster_creator_admin_permissions`
+   - `authentication_mode`
+   - `access_entries`
+   - `cloudwatch_log_group_class`
+
+   - Karpenter
+      - `iam_policy_name`
+      - `iam_policy_use_name_prefix`
+      - `iam_policy_description`
+      - `iam_policy_path`
+      - `enable_irsa`
+      - `create_access_entry`
+      - `access_entry_type`
+
+   - Self-managed node group
+      - `instance_maintenance_policy`
+      - `create_access_entry`
+      - `iam_role_arn`
+
+4. Removed outputs:
+
+   - `aws_auth_configmap_yaml`
+
+5. Renamed outputs:
+
+   - Karpenter
+      - `irsa_name` -> `iam_role_name`
+      - `irsa_arn` -> `iam_role_arn`
+      - `irsa_unique_id` -> `iam_role_unique_id`
+      - `role_name` -> `node_iam_role_name`
+      - `role_arn` -> `node_iam_role_arn`
+      - `role_unique_id` -> `node_iam_role_unique_id`
+
+6. Added outputs:
+
+   - `access_entries`
+
+   - Karpenter
+      - `node_access_entry_arn`
+
+   - Self-managed node group
+      - `access_entry_arn`
+
+## Upgrade Migrations
+
+### Diff of Before (v19.21) vs After (v20.0)
+
+```diff
+ module "eks" {
+   source  = "terraform-aws-modules/eks/aws"
+-  version = "~> 19.21"
++  version = "~> 20.0"
+
+# If you want to maintain the current default behavior of v19.x
++  kms_key_enable_default_policy = false
+
+-   manage_aws_auth_configmap = true
+
+-   aws_auth_roles = [
+-     {
+-       rolearn  = "arn:aws:iam::66666666666:role/role1"
+-       username = "role1"
+-       groups   = ["custom-role-group"]
+-     },
+-   ]
+
+-   aws_auth_users = [
+-     {
+-       userarn  = "arn:aws:iam::66666666666:user/user1"
+-       username = "user1"
+-       groups   = ["custom-users-group"]
+-     },
+-   ]
+}
+
++ module "eks_aws_auth" {
++   source  = "terraform-aws-modules/eks/aws//modules/aws-auth"
++   version = "~> 20.0"
+
++   manage_aws_auth_configmap = true
+
++   aws_auth_roles = [
++     {
++       rolearn  = "arn:aws:iam::66666666666:role/role1"
++       username = "role1"
++       groups   = ["custom-role-group"]
++     },
++   ]
+
++   aws_auth_users = [
++     {
++       userarn  = "arn:aws:iam::66666666666:user/user1"
++       username = "user1"
++       groups   = ["custom-users-group"]
++     },
++   ]
++ }
+```
+
+### Karpenter Diff of Before (v19.21) vs After (v20.0)
+
+```diff
+ module "eks_karpenter" {
+   source  = "terraform-aws-modules/eks/aws//modules/karpenter"
+-  version = "~> 19.21"
++  version = "~> 20.0"
+
+# If you wish to maintain the current default behavior of v19.x
++  enable_irsa             = true
++  create_instance_profile = true
+
+# To avoid any resource re-creation
++  iam_role_name          = "KarpenterIRSA-${module.eks.cluster_name}"
++  iam_role_description   = "Karpenter IAM role for service account"
++  iam_policy_name        = "KarpenterIRSA-${module.eks.cluster_name}"
++  iam_policy_description = "Karpenter IAM role for service account"
+}
+```
+
+## Terraform State Moves
+
+#### ⚠️ Authentication Mode Changes ⚠️
+
+Changing the `authentication_mode` is a one-way decision. See [announcement blog](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for further details:
+
+> Switching authentication modes on an existing cluster is a one-way operation. You can switch from CONFIG_MAP to API_AND_CONFIG_MAP. You can then switch from API_AND_CONFIG_MAP to API. You cannot revert these operations in the opposite direction. Meaning you cannot switch back to CONFIG_MAP or API_AND_CONFIG_MAP from API.
+
+> [!IMPORTANT]
+> If migrating to cluster access entries and you will NOT have any entries that remain in the `aws-auth` ConfigMap, you do not need to remove the configmap from the statefile. You can simply follow the migration guide and once access entries have been created, you can let Terraform remove/delete the `aws-auth` ConfigMap.
+>
+> If you WILL have entries that remain in the `aws-auth` ConfigMap, then you will need to remove the ConfigMap resources from the statefile to avoid any disruptions. When you add the new `aws-auth` sub-module and apply the changes, the sub-module will upsert the ConfigMap on the cluster. Provided the necessary entries are defined in that sub-module's definition, it will "re-adopt" the ConfigMap under Terraform's control.
+
+### authentication_mode = "API_AND_CONFIG_MAP"
+
+When using `authentication_mode = "API_AND_CONFIG_MAP"` and there are entries that will remain in the configmap (entries that cannot be replaced by cluster access entry), you will first need to update the `authentication_mode` on the cluster to `"API_AND_CONFIG_MAP"`. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed node groups and Fargate profiles; users do not need to do anything additional for these roles.
+
+Once the `authentication_mode` has been updated, next you will need to remove the configmap from the statefile to avoid any disruptions:
+
+> [!NOTE]
+> This is only required if there are entries that will remain in the `aws-auth` ConfigMap after migrating. Otherwise, you can skip this step and let Terraform destroy the ConfigMap.
+
+```sh
+terraform state rm 'module.eks.kubernetes_config_map_v1_data.aws_auth[0]'
+terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if Terraform created the original configmap
+```
+
+#### ℹ️ Terraform 1.7+ users
+
+If you are using Terraform `v1.7+`, you can utilize the [`remove`](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) to facilitate both the removal of the configmap through code. You can create a fork/clone of the provided [migration module](https://github.com/clowdhaus/terraform-aws-eks-migrate-v19-to-v20) and add the `remove` blocks and apply those changes before proceeding. We do not want to force users onto the bleeding edge with this module, so we have not included `remove` support at this time.
+
+Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. In the before/example above - the configmap would remove any entries for roles used by node groups and/or Fargate Profiles, but maintain the custom entries for users and roles passed into the module definition.
+
+### authentication_mode = "API"
+
+In order to switch to `API` only using cluster access entry, you first need to update the `authentication_mode` on the cluster to `API_AND_CONFIG_MAP` without modifying the `aws-auth` configmap. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed node groups and Fargate profiles; users do not need to do anything additional for these roles.
+
+Once the `authentication_mode` has been updated, you can update the `authentication_mode` on the cluster to `API` and remove the `aws-auth` configmap components.
diff --git a/docs/UPGRADE-21.0.md b/docs/UPGRADE-21.0.md
new file mode 100644
index 0000000000..695c8c80f2
--- /dev/null
+++ b/docs/UPGRADE-21.0.md
@@ -0,0 +1,328 @@
+# Upgrade from v20.x to v21.x
+
+If you have any questions regarding this upgrade process, please consult the [`examples`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples) directory:
+If you find a bug, please open an issue with supporting configuration to reproduce.
+
+## List of backwards incompatible changes
+
+- Terraform `v1.5.7` is now minimum supported version
+- AWS provider `v6.0.0` is now minimum supported version
+- TLS provider `v4.0.0` is now minimum supported version
+- The `aws-auth` sub-module has been removed. Users who wish to utilize its functionality can continue to do so by specifying a `v20.x` version, or `~> v20.0` version constraint in their module source.
+- `bootstrap_self_managed_addons` is now hardcoded to `false`. This is a legacy setting and instead users should utilize the EKS addons API, which is what this module does by default. In conjunction with this change, the `bootstrap_self_managed_addons` is now ignored by the module to aid in upgrading without disruption (otherwise it would require cluster re-creation).
+- When enabling `enable_efa_support` or creating placement groups within a node group, users must now specify the correct `subnet_ids`; the module no longer tries to automatically select a suitable subnet.
+- EKS managed node group:
+    - IMDS now default to a hop limit of 1 (previously was 2)
+    - `ami_type` now defaults to `AL2023_x86_64_STANDARD`
+    - `enable_monitoring` is now set to `false` by default
+    - `enable_efa_only` is now set to `true` by default
+    - `use_latest_ami_release_version` is now set to `true` by default
+    - Support for autoscaling group schedules has been removed
+- Self-managed node group:
+    - IMDS now default to a hop limit of 1 (previously was 2)
+    - `ami_type` now defaults to `AL2023_x86_64_STANDARD`
+    - `enable_monitoring` is now set to `false` by default
+    - `enable_efa_only` is now set to `true` by default
+    - Support for autoscaling group schedules has been removed
+- Karpenter:
+    - Native support for IAM roles for service accounts (IRSA) has been removed; EKS Pod Identity is now enabled by default
+    - Karpenter controller policy for prior to Karpenter `v1` have been removed (i.e. `v0.33`); the `v1` policy is now used by default
+    - `create_pod_identity_association` is now set to `true` by default
+- `addons.resolve_conflicts_on_create` is now set to `"NONE"` by default (was `"OVERWRITE"`).
+- `addons.most_recent` is now set to `true` by default (was `false`).
+- `cluster_identity_providers.issuer_url` is now required to be set by users; the prior incorrect default has been removed. See https://github.com/terraform-aws-modules/terraform-aws-eks/pull/3055 and https://github.com/kubernetes/kubernetes/pull/123561 for more details.
+- The OIDC issuer URL for IAM roles for service accounts (IRSA) has been changed to use the new dual stack`oidc-eks` endpoint instead of `oidc.eks`. This is to align with https://github.com/aws/containers-roadmap/issues/2038#issuecomment-2278450601
+
+## Additional changes
+
+### Added
+
+- Support for `region` parameter to specify the AWS region for the resources created if different from the provider region.
+- Both the EKS managed and self-managed node groups now support creating their own security groups (again). This is primarily motivated by the changes for EFA support; previously users would need to specify `enable_efa_support` both at the cluster level (to add the appropriate security group rules to the shared node security group) as well as the node group level. However, its not always desirable to have these rules across ALL node groups when they are really only required on the node group where EFA is utilized. And similarly for other use cases, users can create custom rules for a specific node group instead of apply across ALL node groups.
+
+### Modified
+
+- Variable definitions now contain detailed `object` types in place of the previously used any type.
+- The embedded KMS key module definition has been updated to `v4.0` to support the same version requirements as well as the new `region` argument.
+
+### Variable and output changes
+
+1. Removed variables:
+
+    - `enable_efa_support` - users only need to set this within the node group configuration, as the module no longer manages EFA support at the cluster level.
+    - `enable_security_groups_for_pods` - users can instead attach the `arn:aws:iam::aws:policy/AmazonEKSVPCResourceController` policy via `iam_role_additional_policies` if using security groups for pods.
+    - `eks-managed-node-group` sub-module
+        - `cluster_service_ipv4_cidr` - users should use `cluster_service_cidr` instead (for either IPv4 or IPv6).
+        - `elastic_gpu_specifications`
+        - `elastic_inference_accelerator`
+        - `platform` - this is superseded by `ami_type`
+        - `placement_group_strategy` - set to `cluster` by the module
+        - `placement_group_az` - users will need to specify the correct subnet in `subnet_ids`
+        - `create_schedule`
+        - `schedules`
+    - `self-managed-node-group` sub-module
+        - `elastic_gpu_specifications`
+        - `elastic_inference_accelerator`
+        - `platform` - this is superseded by `ami_type`
+        - `create_schedule`
+        - `schedules`
+        - `placement_group_az` - users will need to specify the correct subnet in `subnet_ids`
+        - `hibernation_options` - not valid in EKS
+        - `min_elb_capacity` - not valid in EKS
+        - `wait_for_elb_capacity` - not valid in EKS
+        - `wait_for_capacity_timeout` - not valid in EKS
+        - `default_cooldown` - not valid in EKS
+        - `target_group_arns` - not valid in EKS
+        - `service_linked_role_arn` - not valid in EKS
+        - `warm_pool` - not valid in EKS
+    - `fargate-profile` sub-module
+        - None
+    - `karpenter` sub-module
+        - `enable_v1_permissions` - v1 permissions are now the default
+        - `enable_irsa`
+        - `irsa_oidc_provider_arn`
+        - `irsa_namespace_service_accounts`
+        - `irsa_assume_role_condition_test`
+
+2. Renamed variables:
+
+    - Variables prefixed with `cluster_*` have been stripped of the prefix to better match the underlying API:
+        - `cluster_name` -> `name`
+        - `cluster_version` -> `kubernetes_version`
+        - `cluster_enabled_log_types` -> `enabled_log_types`
+        - `cluster_force_update_version` -> `force_update_version`
+        - `cluster_compute_config` -> `compute_config`
+        - `cluster_upgrade_policy` -> `upgrade_policy`
+        - `cluster_remote_network_config` -> `remote_network_config`
+        - `cluster_zonal_shift_config` -> `zonal_shift_config`
+        - `cluster_additional_security_group_ids` -> `additional_security_group_ids`
+        - `cluster_endpoint_private_access` -> `endpoint_private_access`
+        - `cluster_endpoint_public_access` -> `endpoint_public_access`
+        - `cluster_endpoint_public_access_cidrs` -> `endpoint_public_access_cidrs`
+        - `cluster_ip_family` -> `ip_family`
+        - `cluster_service_ipv4_cidr` -> `service_ipv4_cidr`
+        - `cluster_service_ipv6_cidr` -> `service_ipv6_cidr`
+        - `cluster_encryption_config` -> `encryption_config`
+        - `create_cluster_primary_security_group_tags` -> `create_primary_security_group_tags`
+        - `cluster_timeouts` -> `timeouts`
+        - `create_cluster_security_group` -> `create_security_group`
+        - `cluster_security_group_id` -> `security_group_id`
+        - `cluster_security_group_name` -> `security_group_name`
+        - `cluster_security_group_use_name_prefix` -> `security_group_use_name_prefix`
+        - `cluster_security_group_description` -> `security_group_description`
+        - `cluster_security_group_additional_rules` -> `security_group_additional_rules`
+        - `cluster_security_group_tags` -> `security_group_tags`
+        - `cluster_encryption_policy_use_name_prefix` -> `encryption_policy_use_name_prefix`
+        - `cluster_encryption_policy_name` -> `encryption_policy_name`
+        - `cluster_encryption_policy_description` -> `encryption_policy_description`
+        - `cluster_encryption_policy_path` -> `encryption_policy_path`
+        - `cluster_encryption_policy_tags` -> `encryption_policy_tags`
+        - `cluster_addons` -> `addons`
+        - `cluster_addons_timeouts` -> `addons_timeouts`
+        - `cluster_identity_providers` -> `identity_providers`
+    - `eks-managed-node-group` sub-module
+        - `cluster_version` -> `kubernetes_version`
+    - `self-managed-node-group` sub-module
+        - `cluster_version` -> `kubernetes_version`
+        - `delete_timeout` -> `timeouts`
+    - `fargate-profile` sub-module
+        - None
+    - `karpenter` sub-module
+        - None
+
+3. Added variables:
+
+    - `region`
+    - `eks-managed-node-group` sub-module
+        - `region`
+        - `partition` - added to reduce number of `GET` requests from data sources when possible
+        - `account_id` - added to reduce number of `GET` requests from data sources when possible
+        - `create_security_group`
+        - `security_group_name`
+        - `security_group_use_name_prefix`
+        - `security_group_description`
+        - `security_group_ingress_rules`
+        - `security_group_egress_rules`
+        - `security_group_tags`
+    - `self-managed-node-group` sub-module
+        - `region`
+        - `partition` - added to reduce number of `GET` requests from data sources when possible
+        - `account_id` - added to reduce number of `GET` requests from data sources when possible
+        - `create_security_group`
+        - `security_group_name`
+        - `security_group_use_name_prefix`
+        - `security_group_description`
+        - `security_group_ingress_rules`
+        - `security_group_egress_rules`
+        - `security_group_tags`
+    - `fargate-profile` sub-module
+        - `region`
+        - `partition` - added to reduce number of `GET` requests from data sources when possible
+        - `account_id` - added to reduce number of `GET` requests from data sources when possible
+    - `karpenter` sub-module
+        - `region`
+
+4. Removed outputs:
+
+    - `eks-managed-node-group` sub-module
+        - `platform` - this is superseded by `ami_type`
+        - `autoscaling_group_schedule_arns`
+    - `self-managed-node-group` sub-module
+        - `platform` - this is superseded by `ami_type`
+        - `autoscaling_group_schedule_arns`
+    - `fargate-profile` sub-module
+        - None
+    - `karpenter` sub-module
+        - None
+
+5. Renamed outputs:
+
+    - `eks-managed-node-group` sub-module
+        - None
+    - `self-managed-node-group` sub-module
+        - None
+    - `fargate-profile` sub-module
+        - None
+    - `karpenter` sub-module
+        - None
+
+6. Added outputs:
+
+    - `eks-managed-node-group` sub-module
+        - `security_group_arn`
+        - `security_group_id`
+    - `self-managed-node-group` sub-module
+        - `security_group_arn`
+        - `security_group_id`
+    - `fargate-profile` sub-module
+        - None
+    - `karpenter` sub-module
+        - None
+
+## Upgrade Migrations
+
+### Before 20.x Example
+
+```hcl
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 20.0"
+
+  # Truncated for brevity ...
+  # Renamed variables are not shown here, please refer to the full list above.
+
+  enable_efa_support = true
+
+  eks_managed_node_group_defaults = {
+    iam_role_additional_policies = {
+      AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
+    }
+  }
+
+  eks_managed_node_groups = {
+    efa = {
+      ami_type       = "AL2023_x86_64_NVIDIA"
+      instance_types = ["p5e.48xlarge"]
+
+      enable_efa_support = true
+      enable_efa_only    = true
+    }
+  }
+
+  self_managed_node_groups = {
+    example = {
+      use_mixed_instances_policy = true
+      mixed_instances_policy = {
+        instances_distribution = {
+          on_demand_base_capacity                  = 0
+          on_demand_percentage_above_base_capacity = 0
+          on_demand_allocation_strategy            = "lowest-price"
+          spot_allocation_strategy                 = "price-capacity-optimized"
+        }
+
+        # ASG configuration
+        override = [
+          {
+            instance_requirements = {
+              cpu_manufacturers                           = ["intel"]
+              instance_generations                        = ["current", "previous"]
+              spot_max_price_percentage_over_lowest_price = 100
+
+              vcpu_count = {
+                min = 1
+              }
+
+              allowed_instance_types = ["t*", "m*"]
+            }
+          }
+        ]
+      }
+    }
+  }
+}
+```
+
+### After 21.x Example
+
+```hcl
+module "eks" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  # Truncated for brevity ...
+  # Renamed variables are not shown here, please refer to the full list above.
+
+  eks_managed_node_groups = {
+    efa = {
+      ami_type       = "AL2023_x86_64_NVIDIA"
+      instance_types = ["p5e.48xlarge"]
+
+      iam_role_additional_policies = {
+        AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
+      }
+
+      enable_efa_support = true
+
+      subnet_ids = element(module.vpc.private_subnets, 0)
+    }
+  }
+
+  self_managed_node_groups = {
+    example = {
+      use_mixed_instances_policy = true
+      mixed_instances_policy = {
+        instances_distribution = {
+          on_demand_base_capacity                  = 0
+          on_demand_percentage_above_base_capacity = 0
+          on_demand_allocation_strategy            = "lowest-price"
+          spot_allocation_strategy                 = "price-capacity-optimized"
+        }
+
+        # ASG configuration
+        # Need to wrap in `launch_template` now
+        launch_template = {
+          override = [
+            {
+              instance_requirements = {
+                cpu_manufacturers                           = ["intel"]
+                instance_generations                        = ["current", "previous"]
+                spot_max_price_percentage_over_lowest_price = 100
+
+                vcpu_count = {
+                  min = 1
+                }
+
+                allowed_instance_types = ["t*", "m*"]
+              }
+            }
+          ]
+        }
+      }
+    }
+  }
+}
+```
+
+### State Changes
+
+No state changes required.
diff --git a/docs/assets/logo.png b/docs/assets/logo.png
new file mode 100644
index 0000000000..cdfb9fc545
Binary files /dev/null and b/docs/assets/logo.png differ
diff --git a/docs/assets/terraform-aws.png b/docs/assets/terraform-aws.png
new file mode 100644
index 0000000000..06974642a2
Binary files /dev/null and b/docs/assets/terraform-aws.png differ
diff --git a/docs/autoscaling.md b/docs/autoscaling.md
deleted file mode 100644
index c5ba615433..0000000000
--- a/docs/autoscaling.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# Autoscaling
-
-To enable worker node autoscaling you will need to do a few things:
-
-- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
-- Install the cluster-autoscaler
-- Give the cluster-autoscaler access via an IAM policy
-
-It's probably easiest to follow the example in [examples/irsa](../examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
-
-If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
-
-```hcl
-resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
-  policy_arn = aws_iam_policy.worker_autoscaling.arn
-  role       = module.my_cluster.worker_iam_role_name[0]
-}
-
-resource "aws_iam_policy" "worker_autoscaling" {
-  name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
-  description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
-  policy      = data.aws_iam_policy_document.worker_autoscaling.json
-  path        = var.iam_path
-  tags        = var.tags
-}
-
-data "aws_iam_policy_document" "worker_autoscaling" {
-  statement {
-    sid    = "eksWorkerAutoscalingAll"
-    effect = "Allow"
-
-    actions = [
-      "autoscaling:DescribeAutoScalingGroups",
-      "autoscaling:DescribeAutoScalingInstances",
-      "autoscaling:DescribeLaunchConfigurations",
-      "autoscaling:DescribeTags",
-      "ec2:DescribeLaunchTemplateVersions",
-    ]
-
-    resources = ["*"]
-  }
-
-  statement {
-    sid    = "eksWorkerAutoscalingOwn"
-    effect = "Allow"
-
-    actions = [
-      "autoscaling:SetDesiredCapacity",
-      "autoscaling:TerminateInstanceInAutoScalingGroup",
-      "autoscaling:UpdateAutoScalingGroup",
-    ]
-
-    resources = ["*"]
-
-    condition {
-      test     = "StringEquals"
-      variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
-      values   = ["owned"]
-    }
-
-    condition {
-      test     = "StringEquals"
-      variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
-      values   = ["true"]
-    }
-  }
-}
-```
-
-And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
-
-```yaml
-rbac:
-  create: true
-
-cloudProvider: aws
-awsRegion: YOUR_AWS_REGION
-
-autoDiscovery:
-  clusterName: YOUR_CLUSTER_NAME
-  enabled: true
-
-image:
-  repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
-  tag: v1.16.5
-```
-
-To install the chart, simply run helm with the `--values` option:
-
-```
-helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
-```
-
-## Notes
-
-There is a variable `asg_desired_capacity` given in the `local.tf` file, currently it can be used to change the desired worker(s) capacity in the autoscaling group but currently it is being ignored in terraform to reduce the [complexities](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/510#issuecomment-531700442) and the feature of scaling up and down the cluster nodes is being handled by the cluster autoscaler.
-
-The cluster autoscaler major and minor versions must match your cluster. For example if you are running a 1.16 EKS cluster set `image.tag=v1.16.5`. Search through their [releases page](https://github.com/kubernetes/autoscaler/releases) for valid version numbers.
diff --git a/docs/compute_resources.md b/docs/compute_resources.md
new file mode 100644
index 0000000000..e7dee7660c
--- /dev/null
+++ b/docs/compute_resources.md
@@ -0,0 +1,153 @@
+# Compute Resources
+
+## Table of Contents
+
+- [EKS Managed Node Groups](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md#eks-managed-node-groups)
+- [Self Managed Node Groups](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md#self-managed-node-groups)
+- [Fargate Profiles](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md#fargate-profiles)
+- [Default Configurations](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md#default-configurations)
+
+ℹ️ Only the pertinent attributes are shown below for brevity
+
+### EKS Managed Node Groups
+
+Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) documentation for service related details.
+
+1. The module creates a custom launch template by default to ensure settings such as tags are propagated to instances. Please note that many of the customization options listed [here](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/eks-managed-node-group#Inputs) are only available when a custom launch template is created. To use the default template provided by the AWS EKS managed node group service, disable the launch template creation by setting `use_custom_launch_template` to `false`:
+
+```hcl
+  eks_managed_node_groups = {
+    default = {
+      use_custom_launch_template = false
+    }
+  }
+```
+
+2. Native support for Bottlerocket OS is provided by providing the respective AMI type:
+
+```hcl
+  eks_managed_node_groups = {
+    bottlerocket_default = {
+      use_custom_launch_template = false
+
+      ami_type = "BOTTLEROCKET_x86_64"
+    }
+  }
+```
+
+3. Bottlerocket OS is supported in a similar manner. However, note that the user data for Bottlerocket OS uses the TOML format:
+
+```hcl
+  eks_managed_node_groups = {
+    bottlerocket_prepend_userdata = {
+      ami_type = "BOTTLEROCKET_x86_64"
+
+      bootstrap_extra_args = <<-EOT
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+      EOT
+    }
+  }
+```
+
+4. When using a custom AMI, the AWS EKS Managed Node Group service will NOT inject the necessary bootstrap script into the supplied user data. Users can elect to provide their own user data to bootstrap and connect or opt in to use the module provided user data:
+
+```hcl
+  eks_managed_node_groups = {
+    custom_ami = {
+      ami_id   = "ami-0caf35bc73450c396"
+      ami_type = "AL2023_x86_64_STANDARD"
+
+      # By default, EKS managed node groups will not append bootstrap script;
+      # this adds it back in using the default template provided by the module
+      # Note: this assumes the AMI provided is an EKS optimized AMI derivative
+      enable_bootstrap_user_data = true
+
+      cloudinit_pre_nodeadm = [{
+        content      = <<-EOT
+          ---
+          apiVersion: node.eks.aws/v1alpha1
+          kind: NodeConfig
+          spec:
+            kubelet:
+              config:
+                shutdownGracePeriod: 30s
+        EOT
+        content_type = "application/node.eks.aws"
+      }]
+
+      # This is only possible when `ami_id` is specified, indicating a custom AMI
+      cloudinit_post_nodeadm = [{
+        content      = <<-EOT
+          echo "All done"
+        EOT
+        content_type = "text/x-shellscript; charset=\"us-ascii\""
+      }]
+    }
+  }
+```
+
+5. There is similar support for Bottlerocket OS:
+
+```hcl
+  eks_managed_node_groups = {
+    bottlerocket_custom_ami = {
+      ami_id   = "ami-0ff61e0bcfc81dc94"
+      ami_type = "BOTTLEROCKET_x86_64"
+
+      # use module user data template to bootstrap
+      enable_bootstrap_user_data = true
+      # this will get added to the template
+      bootstrap_extra_args = <<-EOT
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+
+        [settings.kubernetes.node-labels]
+        "label1" = "foo"
+        "label2" = "bar"
+
+        [settings.kubernetes.node-taints]
+        "dedicated" = "experimental:PreferNoSchedule"
+        "special" = "true:NoSchedule"
+      EOT
+    }
+  }
+```
+
+See the [`examples/eks-managed-node-group/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks-managed-node-group) for a working example of various configurations.
+
+### Self Managed Node Groups
+
+Refer to the [Self Managed Node Group documentation](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) documentation for service related details.
+
+1. The `self-managed-node-group` uses the latest AWS EKS Optimized AMI (Linux) for the given Kubernetes version by default:
+
+```hcl
+  kubernetes_version = "1.33"
+
+  # This self managed node group will use the latest AWS EKS Optimized AMI for Kubernetes 1.33
+  self_managed_node_groups = {
+    default = {}
+  }
+```
+
+2. To use Bottlerocket, specify the `ami_type` as one of the respective `"BOTTLEROCKET_*" types` and supply a Bottlerocket OS AMI:
+
+```hcl
+  kubernetes_version = "1.33"
+
+  self_managed_node_groups = {
+    bottlerocket = {
+      ami_id   = data.aws_ami.bottlerocket_ami.id
+      ami_type = "BOTTLEROCKET_x86_64"
+    }
+  }
+```
+
+See the [`examples/self-managed-node-group/` example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self-managed-node-group) for a working example of various configurations.
+
+### Fargate Profiles
+
+Fargate profiles are straightforward to use and therefore no further details are provided here. See the [`tests/fargate-profile/` tests](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/tests/fargate-profile) for a working example of various configurations.
diff --git a/docs/enable-docker-bridge-network.md b/docs/enable-docker-bridge-network.md
deleted file mode 100644
index f6eb8ee11e..0000000000
--- a/docs/enable-docker-bridge-network.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Enable Docker Bridge Network
-
-The latest versions of the AWS EKS-optimized AMI disable the docker bridge network by default. To enable it, add the `bootstrap_extra_args` parameter to your worker group template.
-
-```hcl
-locals {
-  worker_groups = [
-    {
-      # Other parameters omitted for brevity
-      bootstrap_extra_args = "--enable-docker-bridge true"
-    }
-  ]
-}
-```
-
-Examples of when this would be necessary are:
-
-- You are running Continuous Integration in K8s, and building docker images by either mounting the docker sock as a volume or using docker in docker. Without the bridge enabled, internal routing from the inner container can't reach the outside world.
-
-## See More
-
-- [Docker in Docker no longer works without docker0 bridge](https://github.com/awslabs/amazon-eks-ami/issues/183)
-- [Add enable-docker-bridge bootstrap argument](https://github.com/awslabs/amazon-eks-ami/pull/187)
diff --git a/docs/faq.md b/docs/faq.md
index 3b9e118f6b..69d8c90e7f 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -1,193 +1,315 @@
 # Frequently Asked Questions
 
-## How do I customize X on the worker group's settings?
+- [Setting `disk_size` or `remote_access` does not make any changes](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#Settings-disk_size-or-remote_access-does-not-make-any-changes)
+- [I received an error: `expect exactly one securityGroup tagged with kubernetes.io/cluster/<NAME> ...`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#i-received-an-error-expect-exactly-one-securitygroup-tagged-with-kubernetesioclustername-)
+- [Why are nodes not being registered?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-nodes-not-being-registered)
+- [Why are there no changes when a node group's `desired_size` is modified?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-there-no-changes-when-a-node-groups-desired_size-is-modified)
+- [How do I access compute resource attributes?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#how-do-i-access-compute-resource-attributes)
+- [What add-ons are available?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#what-add-ons-are-available)
+- [What configuration values are available for an add-on?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#what-configuration-values-are-available-for-an-add-on)
 
-All the options that can be customized for worker groups are listed in [local.tf](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/local.tf) under `workers_group_defaults_defaults`.
+### Setting `disk_size` or `remote_access` does not make any changes
 
-Please open Issues or PRs if you think something is missing.
+`disk_size`, and `remote_access` can only be set when using the EKS managed node group default launch template. This module defaults to providing a custom launch template to allow for custom security groups, tag propagation, etc. If you wish to forgo the custom launch template route, you can set `use_custom_launch_template = false` and then you can set `disk_size` and `remote_access`.
 
-## Why are nodes not being registered?
+### I received an error: `expect exactly one securityGroup tagged with kubernetes.io/cluster/<CLUSTER_NAME> ...`
 
-### Networking
+⚠️ `<CLUSTER_NAME>` would be the name of your cluster
 
-Often caused by a networking or endpoint configuration issue.
+By default, EKS creates a cluster primary security group that is created outside of the module and the EKS service adds the tag `{ "kubernetes.io/cluster/<CLUSTER_NAME>" = "owned" }`. This on its own does not cause any conflicts for addons such as the AWS Load Balancer Controller until users decide to attach both the cluster primary security group and the shared node security group created by the module (by setting `attach_cluster_primary_security_group = true`). The issue is not with having multiple security groups in your account with this tag key:value combination, but having multiple security groups with this tag key:value combination attached to nodes in the same cluster. There are a few ways to resolve this depending on your use case/intentions:
 
-At least one of the cluster public or private endpoints must be enabled in order for access to the cluster to work.
+1. If you want to use the cluster primary security group, you can disable the creation of the shared node security group with:
 
-Nodes need to be able to contact the EKS cluster endpoint. By default the module only creates a public endpoint. To access this endpoint the nodes need outgoing internet access:
-- Nodes in private subnets: via a NAT gateway or instance. This will need adding along with appropriate routing rules.
-- Nodes in public subnets: assign public IPs to nodes. Set `public_ip = true` in the `worker_groups` list on this module.
-
-Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node calls to the endpoint stay within the VPC.
-
-When the private endpoint is enabled ensure that VPC DNS resolution and hostnames are also enabled:
-- If managing the VPC with Terraform: set `enable_dns_hostnames = true` and `enable_dns_support = true` on the `aws_vpc` resource. The [`terraform-aws-module/vpc/aws`](https://github.com/terraform-aws-modules/terraform-aws-vpc/) community module also has these variables.
-- Otherwise refer to the [AWS VPC docs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-updating) and [AWS EKS Cluster Endpoint Access docs](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for more information.
-
-Nodes need to be able to connect to other AWS services plus pull down container images from repos. If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
-
-### `aws-auth` ConfigMap not present
-
-The module configures the `aws-auth` ConfigMap. This is used by the cluster to grant IAM users and roles RBAC permissions in the cluster, like the IAM role assigned to the worker nodes.
-
-Confirm that the ConfigMap matches the contents of the `config_map_aws_auth` module output. You can retrieve the live config by running the following in your terraform folder:
-`kubectl --kubeconfig=kubeconfig_* -n kube-system get cm aws-auth -o yaml`
-
-If the ConfigMap is missing or the contents are incorrect then ensure that you have properly configured the kubernetes provider block by referring to [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) and run `terraform apply` again.
-
-Users with `manage_aws_auth = false` will need to apply the ConfigMap themselves.
-
-## How can I work with the cluster if I disable the public endpoint?
-
-You have to interact with the cluster from within the VPC that it's associated with, from an instance that's allowed access via the cluster's security group.
-
-Creating a new cluster with the public endpoint disabled is harder to achieve. You will either want to pass in a pre-configured cluster security group or apply the `aws-auth` configmap in a separate action.
-
-## ConfigMap "aws-auth" already exists
-
-This can happen if the kubernetes provider has not been configured for use with the cluster. The kubernetes provider will be accessing your default kubernetes cluster which already has the map defined. Read [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details on how to configure the kubernetes provider correctly.
-
-Users upgrading from modules before 8.0.0 will need to import their existing aws-auth ConfigMap in to the terraform state. See 8.0.0's [CHANGELOG](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/v8.0.0/CHANGELOG.md#v800---2019-12-11) for more details.
-
-## `Error: Get http://localhost/api/v1/namespaces/kube-system/configmaps/aws-auth: dial tcp 127.0.0.1:80: connect: connection refused`
-
-Usually this means that the kubernetes provider has not been configured, there is no default `~/.kube/config` and so the kubernetes provider is attempting to talk to localhost.
-
-You need to configure the kubernetes provider correctly. See [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details.
-
-## How can I stop Terraform from removing the EKS tags from my VPC and subnets?
+```hcl
+  create_node_security_group = false # default is true
+
+  eks_managed_node_group = {
+    example = {
+      attach_cluster_primary_security_group = true # default is false
+    }
+  }
+  # Or for self-managed
+  self_managed_node_group = {
+    example = {
+      attach_cluster_primary_security_group = true # default is false
+    }
+  }
+```
 
-You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
+2. By not attaching the cluster primary security group. The cluster primary security group has quite broad access and the module has instead provided a security group with the minimum amount of access to launch an empty EKS cluster successfully and users are encouraged to open up access when necessary to support their workload.
 
-An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore\_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
+```hcl
+  eks_managed_node_group = {
+    example = {
+      attach_cluster_primary_security_group = true # default is false
+    }
+  }
+  # Or for self-managed
+  self_managed_node_group = {
+    example = {
+      attach_cluster_primary_security_group = true # default is false
+    }
+  }
+```
 
-## How do I safely remove old worker groups?
+In theory, if you are attaching the cluster primary security group, you shouldn't need to use the shared node security group created by the module. However, this is left up to users to decide for their requirements and use case.
 
-You've added new worker groups. Deleting worker groups from earlier in the list causes Terraform to want to recreate all worker groups. This is a limitation with how Terraform works and the module using `count` to create the ASGs and other resources.
+If you choose to use [Custom Networking](https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html), make sure to only attach the security groups matching your choice above in your ENIConfig resources. This will ensure you avoid redundant tags.
 
-The safest and easiest option is to set `asg_min_size` and `asg_max_size` to 0 on the worker groups to "remove".
+### Why are nodes not being registered?
 
-## Why does changing the worker group's desired count not do anything?
+Nodes not being able to register with the EKS control plane is generally due to networking mis-configurations.
 
-The module is configured to ignore this value. Unfortunately Terraform does not support variables within the `lifecycle` block.
+1. At least one of the cluster endpoints (public or private) must be enabled.
 
-The setting is ignored to allow the cluster autoscaler to work correctly and so that terraform apply does not accidentally remove running workers.
+If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More info regarding communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
 
-You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
+2. Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access the endpoint, the nodes need outgoing internet access:
 
-If you are not using autoscaling and really want to control the number of nodes via terraform then set the `asg_min_size` and `asg_max_size` instead. AWS will remove a random instance when you scale down. You will have to weigh the risks here.
+- Nodes in private subnets: via a NAT gateway or instance along with the appropriate routing rules
+- Nodes in public subnets: ensure that nodes are launched with public IPs (enable through either the module here or your subnet setting defaults)
 
-## Why are nodes not recreated when the `launch_configuration`/`launch_template` is recreated?
+**Important: If you apply only the public endpoint and configure the `cluster_endpoint_public_access_cidrs` to restrict access, know that EKS nodes will also use the public endpoint and you must allow access to the endpoint. If not, then your nodes will fail to work correctly.**
 
-By default the ASG is not configured to be recreated when the launch configuration or template changes. Terraform spins up new instances and then deletes all the old instances in one go as the AWS provider team have refused to implement rolling updates of autoscaling groups. This is not good for kubernetes stability.
+3. The private endpoint can also be enabled by setting `cluster_endpoint_private_access = true`. Ensure that VPC DNS resolution and hostnames are also enabled for your VPC when the private endpoint is enabled.
 
-You need to use a process to drain and cycle the workers.
+4. Nodes need to be able to connect to other AWS services to function (download container images, make API calls to assume roles, etc.). If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
 
-You are not using the cluster autoscaler:
-- Add a new instance
-- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
-- Wait for pods to be Running
-- Terminate the old node instance. ASG will start a new instance
-- Repeat the drain and delete process until all old nodes are replaced
+### Why are there no changes when a node group's `desired_size` is modified?
 
-You are using the cluster autoscaler:
-- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
-- Wait for pods to be Running
-- Cluster autoscaler will create new nodes when required
-- Repeat until all old nodes are drained
-- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
+The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block. The setting is ignored to allow autoscaling via controllers such as cluster autoscaler or Karpenter to work properly and without interference by Terraform. Changing the desired count must be handled outside of Terraform once the node group is created.
 
-Alternatively you can set the `asg_recreate_on_change = true` worker group option to get the ASG recreated after changes to the launch configuration or template. But be aware of the risks to cluster stability mentioned above.
+:info: See [this](https://github.com/bryantbiggs/eks-desired-size-hack) for a workaround to this limitation.
 
-You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
+### How do I access compute resource attributes?
 
-## How do I create kubernetes resources when creating the cluster?
+Examples of accessing the attributes of the compute resource(s) created by the root module are shown below. Note - the assumption is that your cluster module definition is named `eks` as in `module "eks" { ... }`:
 
-You do not need to do anything extra since v12.1.0 of the module as long as the following conditions are met:
-- `manage_aws_auth = true` on the module (default)
-- the kubernetes provider is correctly configured like in the [Usage Example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/README.md#usage-example). Primarily the module's `cluster_id` output is used as input to the `aws_eks_cluster*` data sources.
+- EKS Managed Node Group attributes
 
-The `cluster_id` depends on a `data.http.wait_for_cluster` that polls the EKS cluster's endpoint until it is alive. This blocks initialisation of the kubernetes provider.
+```hcl
+eks_managed_role_arns = [for group in module.eks_managed_node_group : group.iam_role_arn]
+```
 
-## `aws_auth.tf: At 2:14: Unknown token: 2:14 IDENT`
+- Self Managed Node Group attributes
 
-You are attempting to use a Terraform 0.12 module with Terraform 0.11.
+```hcl
+self_managed_role_arns = [for group in module.self_managed_node_group : group.iam_role_arn]
+```
 
-We highly recommend that you upgrade your EKS Terraform config to 0.12 to take advantage of new features in the module.
+- Fargate Profile attributes
 
-Alternatively you can lock your module to a compatible version if you must stay with terraform 0.11:
 ```hcl
-module "eks" {
-  source  = "terraform-aws-modules/eks/aws"
-  version = "~> 4.0"
-  # ...
-}
+fargate_profile_pod_execution_role_arns = [for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn]
 ```
 
-## How can I use Windows workers?
+### What add-ons are available?
 
-To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
+The available EKS add-ons can be [found here](https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html). You can also retrieve the available addons from the API using:
 
-Windows worker nodes requires additional cluster role (eks:kube-proxy-windows). If you are adding windows workers to existing cluster, you should apply config-map-aws-auth again.
+```sh
+aws eks describe-addon-versions --query 'addons[*].addonName'
+```
 
-#### Example configuration
+### What configuration values are available for an add-on?
 
-Amazon EKS clusters must contain one or more Linux worker nodes to run core system pods that only run on Linux, such as coredns and the VPC resource controller.
+> [!NOTE]
+> The available configuration values will vary between add-on versions,
+> typically more configuration values will be added in later versions as functionality is enabled by EKS.
 
-1. Build AWS EKS cluster with the next workers configuration (default Linux):
+You can retrieve the configuration value schema for a given addon using the following command:
 
+```sh
+aws eks describe-addon-configuration --addon-name <value> --addon-version <value> --query 'configurationSchema' --output text | jq
 ```
-worker_groups = [
-    {
-      name                          = "worker-group-linux"
-      instance_type                 = "m5.large"
-      platform                      = "linux"
-      asg_desired_capacity          = 2
-    },
-  ]
-```
-
-2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
 
-3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
+For example:
 
+```sh
+aws eks describe-addon-configuration --addon-name coredns --addon-version v1.11.1-eksbuild.8 --query 'configurationSchema' --output text | jq
 ```
-worker_groups = [
-    {
-      name                          = "worker-group-linux"
-      instance_type                 = "m5.large"
-      platform                      = "linux"
-      asg_desired_capacity          = 2
+
+Returns (at the time of writing):
+
+```json
+{
+  "$ref": "#/definitions/Coredns",
+  "$schema": "http://json-schema.org/draft-06/schema#",
+  "definitions": {
+    "Coredns": {
+      "additionalProperties": false,
+      "properties": {
+        "affinity": {
+          "default": {
+            "affinity": {
+              "nodeAffinity": {
+                "requiredDuringSchedulingIgnoredDuringExecution": {
+                  "nodeSelectorTerms": [
+                    {
+                      "matchExpressions": [
+                        {
+                          "key": "kubernetes.io/os",
+                          "operator": "In",
+                          "values": [
+                            "linux"
+                          ]
+                        },
+                        {
+                          "key": "kubernetes.io/arch",
+                          "operator": "In",
+                          "values": [
+                            "amd64",
+                            "arm64"
+                          ]
+                        }
+                      ]
+                    }
+                  ]
+                }
+              },
+              "podAntiAffinity": {
+                "preferredDuringSchedulingIgnoredDuringExecution": [
+                  {
+                    "podAffinityTerm": {
+                      "labelSelector": {
+                        "matchExpressions": [
+                          {
+                            "key": "k8s-app",
+                            "operator": "In",
+                            "values": [
+                              "kube-dns"
+                            ]
+                          }
+                        ]
+                      },
+                      "topologyKey": "kubernetes.io/hostname"
+                    },
+                    "weight": 100
+                  }
+                ]
+              }
+            }
+          },
+          "description": "Affinity of the coredns pods",
+          "type": [
+            "object",
+            "null"
+          ]
+        },
+        "computeType": {
+          "type": "string"
+        },
+        "corefile": {
+          "description": "Entire corefile contents to use with installation",
+          "type": "string"
+        },
+        "nodeSelector": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "type": "object"
+        },
+        "podAnnotations": {
+          "properties": {},
+          "title": "The podAnnotations Schema",
+          "type": "object"
+        },
+        "podDisruptionBudget": {
+          "description": "podDisruptionBudget configurations",
+          "enabled": {
+            "default": true,
+            "description": "the option to enable managed PDB",
+            "type": "boolean"
+          },
+          "maxUnavailable": {
+            "anyOf": [
+              {
+                "pattern": ".*%$",
+                "type": "string"
+              },
+              {
+                "type": "integer"
+              }
+            ],
+            "default": 1,
+            "description": "minAvailable value for managed PDB, can be either string or integer; if it's string, should end with %"
+          },
+          "minAvailable": {
+            "anyOf": [
+              {
+                "pattern": ".*%$",
+                "type": "string"
+              },
+              {
+                "type": "integer"
+              }
+            ],
+            "description": "maxUnavailable value for managed PDB, can be either string or integer; if it's string, should end with %"
+          },
+          "type": "object"
+        },
+        "podLabels": {
+          "properties": {},
+          "title": "The podLabels Schema",
+          "type": "object"
+        },
+        "replicaCount": {
+          "type": "integer"
+        },
+        "resources": {
+          "$ref": "#/definitions/Resources"
+        },
+        "tolerations": {
+          "default": [
+            {
+              "key": "CriticalAddonsOnly",
+              "operator": "Exists"
+            },
+            {
+              "effect": "NoSchedule",
+              "key": "node-role.kubernetes.io/control-plane"
+            }
+          ],
+          "description": "Tolerations of the coredns pod",
+          "items": {
+            "type": "object"
+          },
+          "type": "array"
+        },
+        "topologySpreadConstraints": {
+          "description": "The coredns pod topology spread constraints",
+          "type": "array"
+        }
+      },
+      "title": "Coredns",
+      "type": "object"
     },
-    {
-      name                          = "worker-group-windows"
-      instance_type                 = "m5.large"
-      platform                      = "windows"
-      asg_desired_capacity          = 1
+    "Limits": {
+      "additionalProperties": false,
+      "properties": {
+        "cpu": {
+          "type": "string"
+        },
+        "memory": {
+          "type": "string"
+        }
+      },
+      "title": "Limits",
+      "type": "object"
     },
-  ]
+    "Resources": {
+      "additionalProperties": false,
+      "properties": {
+        "limits": {
+          "$ref": "#/definitions/Limits"
+        },
+        "requests": {
+          "$ref": "#/definitions/Limits"
+        }
+      },
+      "title": "Resources",
+      "type": "object"
+    }
+  }
+}
 ```
-
-4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
-
-## Worker nodes with labels do not join a 1.16+ cluster
-
-Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16.
-
-Older configurations used labels like `kubernetes.io/lifecycle=spot` and this is no longer allowed. Use `node.kubernetes.io/lifecycle=spot` instead.
-
-Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
-
-## What is the difference between `node_groups` and `worker_groups`?
-
-`node_groups` are [AWS-managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) (configures "Node Groups" that you can find on the EKS dashboard). This system is supposed to ease some of the lifecycle around upgrading nodes. Although they do not do this automatically and you still need to manually trigger the updates.
-
-`worker_groups` are [self-managed nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) (provisions a typical "Autoscaling group" on EC2). It gives you full control over nodes in the cluster like using custom AMI for the nodes. As AWS says, "with worker groups the customer controls the data plane & AWS controls the control plane".
-
-Both can be used together in the same cluster.
-
-## I'm using both AWS-Managed node groups and Self-Managed worker groups and pods scheduled on a AWS Managed node groups are unable resolve DNS (even communication between pods)
-
-This happen because Core DNS can be scheduled on Self-Managed worker groups and by default, the terraform module doesn't create security group rules to ensure communication between pods schedulled on Self-Managed worker group and AWS-Managed node groups.
-
-You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules.
diff --git a/docs/iam-permissions.md b/docs/iam-permissions.md
deleted file mode 100644
index b5810d9435..0000000000
--- a/docs/iam-permissions.md
+++ /dev/null
@@ -1,153 +0,0 @@
-# IAM Permissions
-
-Following IAM permissions are the minimum permissions needed for your IAM user or IAM role to create an EKS cluster.
-
-```json
-{
-    "Version": "2012-10-17",
-    "Statement": [
-        {
-            "Sid": "VisualEditor0",
-            "Effect": "Allow",
-            "Action": [
-                "autoscaling:AttachInstances",
-                "autoscaling:CreateAutoScalingGroup",
-                "autoscaling:CreateLaunchConfiguration",
-                "autoscaling:CreateOrUpdateTags",
-                "autoscaling:DeleteAutoScalingGroup",
-                "autoscaling:DeleteLaunchConfiguration",
-                "autoscaling:DeleteTags",
-                "autoscaling:Describe*",
-                "autoscaling:DetachInstances",
-                "autoscaling:SetDesiredCapacity",
-                "autoscaling:UpdateAutoScalingGroup",
-                "autoscaling:SuspendProcesses",
-                "ec2:AllocateAddress",
-                "ec2:AssignPrivateIpAddresses",
-                "ec2:Associate*",
-                "ec2:AttachInternetGateway",
-                "ec2:AttachNetworkInterface",
-                "ec2:AuthorizeSecurityGroupEgress",
-                "ec2:AuthorizeSecurityGroupIngress",
-                "ec2:CreateDefaultSubnet",
-                "ec2:CreateDhcpOptions",
-                "ec2:CreateEgressOnlyInternetGateway",
-                "ec2:CreateInternetGateway",
-                "ec2:CreateNatGateway",
-                "ec2:CreateNetworkInterface",
-                "ec2:CreateRoute",
-                "ec2:CreateRouteTable",
-                "ec2:CreateSecurityGroup",
-                "ec2:CreateSubnet",
-                "ec2:CreateTags",
-                "ec2:CreateVolume",
-                "ec2:CreateVpc",
-                "ec2:CreateVpcEndpoint",
-                "ec2:DeleteDhcpOptions",
-                "ec2:DeleteEgressOnlyInternetGateway",
-                "ec2:DeleteInternetGateway",
-                "ec2:DeleteNatGateway",
-                "ec2:DeleteNetworkInterface",
-                "ec2:DeleteRoute",
-                "ec2:DeleteRouteTable",
-                "ec2:DeleteSecurityGroup",
-                "ec2:DeleteSubnet",
-                "ec2:DeleteTags",
-                "ec2:DeleteVolume",
-                "ec2:DeleteVpc",
-                "ec2:DeleteVpnGateway",
-                "ec2:Describe*",
-                "ec2:DetachInternetGateway",
-                "ec2:DetachNetworkInterface",
-                "ec2:DetachVolume",
-                "ec2:Disassociate*",
-                "ec2:ModifySubnetAttribute",
-                "ec2:ModifyVpcAttribute",
-                "ec2:ModifyVpcEndpoint",
-                "ec2:ReleaseAddress",
-                "ec2:RevokeSecurityGroupEgress",
-                "ec2:RevokeSecurityGroupIngress",
-                "ec2:UpdateSecurityGroupRuleDescriptionsEgress",
-                "ec2:UpdateSecurityGroupRuleDescriptionsIngress",
-                "ec2:CreateLaunchTemplate",
-                "ec2:CreateLaunchTemplateVersion",
-                "ec2:DeleteLaunchTemplate",
-                "ec2:DeleteLaunchTemplateVersions",
-                "ec2:DescribeLaunchTemplates",
-                "ec2:DescribeLaunchTemplateVersions",
-                "ec2:GetLaunchTemplateData",
-                "ec2:ModifyLaunchTemplate",
-                "ec2:RunInstances",
-                "eks:CreateCluster",
-                "eks:DeleteCluster",
-                "eks:DescribeCluster",
-                "eks:ListClusters",
-                "eks:UpdateClusterConfig",
-                "eks:UpdateClusterVersion",
-                "eks:DescribeUpdate",
-                "eks:TagResource",
-                "eks:UntagResource",
-                "eks:ListTagsForResource",
-                "eks:CreateFargateProfile",
-                "eks:DeleteFargateProfile",
-                "eks:DescribeFargateProfile",
-                "eks:ListFargateProfiles",
-                "eks:CreateNodegroup",
-                "eks:DeleteNodegroup",
-                "eks:DescribeNodegroup",
-                "eks:ListNodegroups",
-                "eks:UpdateNodegroupConfig",
-                "eks:UpdateNodegroupVersion",
-                "iam:AddRoleToInstanceProfile",
-                "iam:AttachRolePolicy",
-                "iam:CreateInstanceProfile",
-                "iam:CreateOpenIDConnectProvider",
-                "iam:CreateServiceLinkedRole",
-                "iam:CreatePolicy",
-                "iam:CreatePolicyVersion",
-                "iam:CreateRole",
-                "iam:DeleteInstanceProfile",
-                "iam:DeleteOpenIDConnectProvider",
-                "iam:DeletePolicy",
-                "iam:DeletePolicyVersion"
-                "iam:DeleteRole",
-                "iam:DeleteRolePolicy",
-                "iam:DeleteServiceLinkedRole",
-                "iam:DetachRolePolicy",
-                "iam:GetInstanceProfile",
-                "iam:GetOpenIDConnectProvider",
-                "iam:GetPolicy",
-                "iam:GetPolicyVersion",
-                "iam:GetRole",
-                "iam:GetRolePolicy",
-                "iam:List*",
-                "iam:PassRole",
-                "iam:PutRolePolicy",
-                "iam:RemoveRoleFromInstanceProfile",
-                "iam:TagOpenIDConnectProvider",
-                "iam:TagRole",
-                "iam:UntagRole",
-                "iam:UpdateAssumeRolePolicy",
-                // Following permissions are needed if cluster_enabled_log_types is enabled
-                "logs:CreateLogGroup",
-                "logs:DescribeLogGroups",
-                "logs:DeleteLogGroup",
-                "logs:ListTagsLogGroup",
-                "logs:PutRetentionPolicy",
-                // Following permissions for working with secrets_encryption example
-                "kms:CreateAlias",
-                "kms:CreateGrant",
-                "kms:CreateKey",
-                "kms:DeleteAlias",
-                "kms:DescribeKey",
-                "kms:GetKeyPolicy",
-                "kms:GetKeyRotationStatus",
-                "kms:ListAliases",
-                "kms:ListResourceTags",
-                "kms:ScheduleKeyDeletion"
-            ],
-            "Resource": "*"
-        }
-    ]
-}
-```
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000000..31af4c6767
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,3 @@
+# Terraform AWS EKS module
+
+Moar content coming soon!
diff --git a/docs/local.md b/docs/local.md
new file mode 100644
index 0000000000..b50caf71a3
--- /dev/null
+++ b/docs/local.md
@@ -0,0 +1,20 @@
+# Local Development
+
+## Documentation Site
+
+In order to run the documentation site locally, you will need to have the following installed locally:
+
+- [Python 3.x](https://www.python.org/downloads/)
+- [mkdocs](https://www.mkdocs.org/user-guide/installation/)
+- The following pip packages for mkdocs (i.e. - `pip install ...`)
+    - `mkdocs-material`
+    - `mkdocs-include-markdown-plugin`
+    - `mkdocs-awesome-pages-plugin`
+
+To run the documentation site locally, run the following command from the root of the repository:
+
+```bash
+mkdocs serve
+```
+
+Opening the documentation at the link posted in the terminal output (i.e. - [http://127.0.0.1:8000/terraform-aws-eks/](http://127.0.0.1:8000/terraform-aws-eks/))
diff --git a/docs/network_connectivity.md b/docs/network_connectivity.md
new file mode 100644
index 0000000000..2cb38d5e3e
--- /dev/null
+++ b/docs/network_connectivity.md
@@ -0,0 +1,67 @@
+# Network Connectivity
+
+## Cluster Endpoint
+
+### Public Endpoint w/ Restricted CIDRs
+
+When restricting the clusters public endpoint to only the CIDRs specified by users, it is recommended that you also enable the private endpoint, or ensure that the CIDR blocks that you specify include the addresses that nodes and Fargate pods (if you use them) access the public endpoint from.
+
+Please refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for further information
+
+## Security Groups
+
+- Cluster Security Group
+  - This module by default creates a cluster security group ("additional" security group when viewed from the console) in addition to the default security group created by the AWS EKS service. This "additional" security group allows users to customize inbound and outbound rules via the module as they see fit
+    - The default inbound/outbound rules provided by the module are derived from the [AWS minimum recommendations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in addition to NTP and HTTPS public internet egress rules (without, these show up in VPC flow logs as rejects - they are used for clock sync and downloading necessary packages/updates)
+    - The minimum inbound/outbound rules are provided for cluster and node creation to succeed without errors, but users will most likely need to add the necessary port and protocol for node-to-node communication (this is user specific based on how nodes are configured to communicate across the cluster)
+    - Users have the ability to opt out of the security group creation and instead provide their own externally created security group if so desired
+    - The security group that is created is designed to handle the bare minimum communication necessary between the control plane and the nodes, as well as any external egress to allow the cluster to successfully launch without error
+  - Users also have the option to supply additional, externally created security groups to the cluster as well via the `cluster_additional_security_group_ids` variable
+  - Lastly, users are able to opt in to attaching the primary security group automatically created by the EKS service by setting `attach_cluster_primary_security_group` = `true` from the root module for the respective node group (or set it within the node group defaults). This security group is not managed by the module; it is created by the EKS service. It permits all traffic within the domain of the security group as well as all egress traffic to the internet.
+
+- Node Group Security Group(s)
+  - Users have the option to assign their own externally created security group(s) to the node group via the `vpc_security_group_ids` variable
+
+See the example snippet below which adds additional security group rules to the cluster security group as well as the shared node security group (for node-to-node access). Users can use this extensibility to open up network access as they see fit using the security groups provided by the module:
+
+```hcl
+  ...
+  # Extend cluster security group rules
+  security_group_additional_rules = {
+    egress_nodes_ephemeral_ports_tcp = {
+      description                = "To node 1025-65535"
+      protocol                   = "tcp"
+      from_port                  = 1025
+      to_port                    = 65535
+      type                       = "egress"
+      source_node_security_group = true
+    }
+  }
+
+  # Extend node-to-node security group rules
+  node_security_group_additional_rules = {
+    ingress_self_all = {
+      description = "Node to node all ports/protocols"
+      protocol    = "-1"
+      from_port   = 0
+      to_port     = 0
+      type        = "ingress"
+      self        = true
+    }
+    egress_all = {
+      description      = "Node all egress"
+      protocol         = "-1"
+      from_port        = 0
+      to_port          = 0
+      type             = "egress"
+      cidr_blocks      = ["0.0.0.0/0"]
+      ipv6_cidr_blocks = ["::/0"]
+    }
+  }
+  ...
+```
+The security groups created by this module are depicted in the image shown below along with their default inbound/outbound rules:
+
+<p align="center">
+  <img src="https://raw.githubusercontent.com/terraform-aws-modules/terraform-aws-eks/master/.github/images/security_groups.svg" alt="Security Groups" width="100%">
+</p>
diff --git a/docs/spot-instances.md b/docs/spot-instances.md
deleted file mode 100644
index f140fe55ea..0000000000
--- a/docs/spot-instances.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# Using spot instances
-
-Spot instances usually cost around 30-70% less than an on-demand instance. So using them for your EKS workloads can save a lot of money but requires some special considerations as they could be terminated with only 2 minutes warning.
-
-You need to install a daemonset to catch the 2 minute warning before termination. This will ensure the node is gracefully drained before termination. You can install the [k8s-spot-termination-handler](https://github.com/kube-aws/kube-spot-termination-notice-handler) for this. There's a [Helm chart](https://github.com/helm/charts/tree/master/stable/k8s-spot-termination-handler):
-
-```
-helm install stable/k8s-spot-termination-handler --namespace kube-system
-```
-
-In the following examples at least 1 worker group that uses on-demand instances is included. This worker group has an added node label that can be used in scheduling. This could be used to schedule any workload not suitable for spot instances but is important for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) as it might be end up unscheduled when spot instances are terminated. You can add this to the values of the [cluster-autoscaler helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart):
-
-```yaml
-nodeSelector:
-  kubernetes.io/lifecycle: normal
-```
-
-Notes:
-
-- The `spot_price` is set to the on-demand price so that the spot instances will run as long as they are the cheaper.
-- It's best to have a broad range of instance types to ensure there's always some instances to run when prices fluctuate.
-- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
-- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
-
-## Using Launch Configuration
-
-Example worker group configuration that uses an ASG with launch configuration for each worker group:
-
-```hcl
-  worker_groups = [
-    {
-      name                = "on-demand-1"
-      instance_type       = "m4.xlarge"
-      asg_max_size        = 1
-      kubelet_extra_args  = "--node-labels=node.kubernetes.io/lifecycle=normal"
-      suspended_processes = ["AZRebalance"]
-    },
-    {
-      name                = "spot-1"
-      spot_price          = "0.199"
-      instance_type       = "c4.xlarge"
-      asg_max_size        = 20
-      kubelet_extra_args  = "--node-labels=node.kubernetes.io/lifecycle=spot"
-      suspended_processes = ["AZRebalance"]
-    },
-    {
-      name                = "spot-2"
-      spot_price          = "0.20"
-      instance_type       = "m4.xlarge"
-      asg_max_size        = 20
-      kubelet_extra_args  = "--node-labels=node.kubernetes.io/lifecycle=spot"
-      suspended_processes = ["AZRebalance"]
-    }
-  ]
-```
-
-## Using Launch Templates
-
-Launch Template support is a recent addition to both AWS and this module. It might not be as tried and tested but it's more suitable for spot instances as it allowed multiple instance types in the same worker group:
-
-```hcl
-  worker_groups = [
-    {
-      name                = "on-demand-1"
-      instance_type       = "m4.xlarge"
-      asg_max_size        = 10
-      kubelet_extra_args  = "--node-labels=spot=false"
-      suspended_processes = ["AZRebalance"]
-    }
-  ]
-
-
-  worker_groups_launch_template = [
-    {
-      name                    = "spot-1"
-      override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
-      spot_instance_pools     = 4
-      asg_max_size            = 5
-      asg_desired_capacity    = 5
-      kubelet_extra_args      = "--node-labels=node.kubernetes.io/lifecycle=spot"
-      public_ip               = true
-    },
-  ]
-```
-
-## Using Launch Templates With Both Spot and On Demand
-
-Example launch template to launch 2 on demand instances of type m5.large, and have the ability to scale up using spot instances and on demand instances. The `node.kubernetes.io/lifecycle` node label will be set to the value queried from the EC2 meta-data service: either "on-demand" or "spot".
-
-`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
-
-```hcl
-  worker_groups_launch_template = [{
-    name                    = "mixed-demand-spot"
-    override_instance_types = ["m5.large", "m5a.large", "m4.large"]
-    root_encrypted          = true
-    root_volume_size        = 50
-
-    asg_min_size                             = 2
-    asg_desired_capacity                     = 2
-    on_demand_base_capacity                  = 3
-    on_demand_percentage_above_base_capacity = 25
-    asg_max_size                             = 20
-    spot_instance_pools                      = 3
-
-    kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`"
-  }]
-```
-
-## Important Notes
-
-An issue with the cluster-autoscaler: https://github.com/kubernetes/autoscaler/issues/1133
-
-AWS have released their own termination handler now: https://github.com/aws/aws-node-termination-handler
diff --git a/docs/user_data.md b/docs/user_data.md
new file mode 100644
index 0000000000..2bd17f69a9
--- /dev/null
+++ b/docs/user_data.md
@@ -0,0 +1,95 @@
+# User Data & Bootstrapping
+
+Users can see the various methods of using and providing user data through the [user data tests](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/tests/user-data) as well more detailed information on the design and possible configurations via the [user data module itself](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data)
+
+## Summary
+
+- AWS EKS Managed Node Groups
+  - By default, any supplied user data is pre-pended to the user data supplied by the EKS Managed Node Group service
+  - If users supply an `ami_id`, the service no longers supplies user data to bootstrap nodes; users can enable `enable_bootstrap_user_data` and use the module provided user data template, or provide their own user data template
+  - AMI types of `BOTTLEROCKET_*`, user data must be in TOML format
+  - AMI types of `WINDOWS_*`, user data must be in powershell/PS1 script format
+- Self Managed Node Groups
+  - `AL2_*` AMI types -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template
+  - `AL2023_*` AMI types -> the user data template (MIME multipart format) provided by the module is used as the default; users are able to provide their own user data template
+  - `BOTTLEROCKET_*` AMI types -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template
+  - `WINDOWS_*` AMI types -> the user data template (powershell/PS1 script) provided by the module is used as the default; users are able to provide their own user data template
+
+The templates provided by the module can be found under the [templates directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/templates)
+
+## EKS Managed Node Group
+
+When using an EKS managed node group, users have 2 primary routes for interacting with the bootstrap user data:
+
+1. If a value for `ami_id` is not provided, users can supply additional user data that is pre-pended before the EKS Managed Node Group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
+
+   - Users can use the following variables to facilitate this process:
+
+    For `AL2_*`, `BOTTLEROCKET_*`, and `WINDOWS_*`:
+    ```hcl
+    pre_bootstrap_user_data = "..."
+    ```
+
+    For `AL2023_*`
+    ```hcl
+    cloudinit_pre_nodeadm = [{
+      content      = <<-EOT
+        ---
+        apiVersion: node.eks.aws/v1alpha1
+        kind: NodeConfig
+        spec:
+          ...
+      EOT
+      content_type = "application/node.eks.aws"
+    }]
+    ```
+
+2. If a custom AMI is used, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary user data to bootstrap and register nodes with the cluster when launched. There are two routes to facilitate this bootstrapping process:
+   - If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched:
+     - Users can use the following variables to facilitate this process:
+       ```hcl
+       enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
+       pre_bootstrap_user_data    = "..."
+       bootstrap_extra_args       = "..."
+       post_bootstrap_user_data   = "..."
+       ```
+   - If the AMI is **NOT** an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective AMI type are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+     - Users can use the following variables to facilitate this process:
+       ```hcl
+       user_data_template_path  = "./your/user_data.sh" # user supplied bootstrap user data template
+       pre_bootstrap_user_data  = "..."
+       bootstrap_extra_args     = "..."
+       post_bootstrap_user_data = "..."
+       ```
+
+| ℹ️ When using bottlerocket, the supplied user data (TOML format) is merged in with the values supplied by EKS. Therefore, `pre_bootstrap_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the `AL2_*` AMI types, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI bootstrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `bootstrap_extra_args` variable. |
+| :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+
+### Self Managed Node Group
+
+Self managed node groups require users to provide the necessary bootstrap user data. Users can elect to use the user data template provided by the module for their respective AMI type or provide their own user data template for rendering by the module.
+
+- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched:
+  - Users can use the following variables to facilitate this process:
+    ```hcl
+    enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
+    pre_bootstrap_user_data    = "..."
+    bootstrap_extra_args       = "..."
+    post_bootstrap_user_data   = "..."
+    ```
+  - If the AMI is **NOT** an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective AMI type are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
+    - Users can use the following variables to facilitate this process:
+      ```hcl
+      user_data_template_path  = "./your/user_data.sh" # user supplied bootstrap user data template
+      pre_bootstrap_user_data  = "..."
+      bootstrap_extra_args     = "..."
+      post_bootstrap_user_data = "..."
+      ```
+
+### Logic Diagram
+
+The rough flow of logic that is encapsulated within the `_user_data` module can be represented by the following diagram to better highlight the various manners in which user data can be populated.
+
+<p align="center">
+  <img src="https://raw.githubusercontent.com/terraform-aws-modules/terraform-aws-eks/master/.github/images/user_data.svg" alt="User Data" width="60%">
+</p>
diff --git a/examples/README.md b/examples/README.md
index 7ee5bebfb9..8d54b3db38 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,11 +1,5 @@
 # Examples
 
-These serve a few purposes:
+The examples provided demonstrate different cluster configurations that users can create with the modules provided.
 
-1.  Shows developers how to use the module in a straightforward way as integrated with other terraform community supported modules.
-2.  Serves as the test infrastructure for CI on the project.
-3.  Provides a simple way to play with the Kubernetes cluster you create.
-
-## IAM Permissions
-
-You can see the minimum IAM Permissions required [here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md).
+Please do not mistake the examples provided as "best practices". It is up to users to consult the AWS service documentation for best practices, usage recommendations, etc.
diff --git a/examples/basic/main.tf b/examples/basic/main.tf
deleted file mode 100644
index c6257a25eb..0000000000
--- a/examples/basic/main.tf
+++ /dev/null
@@ -1,138 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-resource "aws_security_group" "worker_group_mgmt_one" {
-  name_prefix = "worker_group_mgmt_one"
-  vpc_id      = module.vpc.vpc_id
-
-  ingress {
-    from_port = 22
-    to_port   = 22
-    protocol  = "tcp"
-
-    cidr_blocks = [
-      "10.0.0.0/8",
-    ]
-  }
-}
-
-resource "aws_security_group" "worker_group_mgmt_two" {
-  name_prefix = "worker_group_mgmt_two"
-  vpc_id      = module.vpc.vpc_id
-
-  ingress {
-    from_port = 22
-    to_port   = 22
-    protocol  = "tcp"
-
-    cidr_blocks = [
-      "192.168.0.0/16",
-    ]
-  }
-}
-
-resource "aws_security_group" "all_worker_mgmt" {
-  name_prefix = "all_worker_management"
-  vpc_id      = module.vpc.vpc_id
-
-  ingress {
-    from_port = 22
-    to_port   = 22
-    protocol  = "tcp"
-
-    cidr_blocks = [
-      "10.0.0.0/8",
-      "172.16.0.0/12",
-      "192.168.0.0/16",
-    ]
-  }
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc"
-  cidr                 = "10.0.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  private_subnets      = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
-  public_subnets       = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-  enable_nat_gateway   = true
-  single_nat_gateway   = true
-  enable_dns_hostnames = true
-
-  public_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/elb"                      = "1"
-  }
-
-  private_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/internal-elb"             = "1"
-  }
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.private_subnets
-
-  tags = {
-    Environment = "test"
-    GithubRepo  = "terraform-aws-eks"
-    GithubOrg   = "terraform-aws-modules"
-  }
-
-  vpc_id = module.vpc.vpc_id
-
-  worker_groups = [
-    {
-      name                          = "worker-group-1"
-      instance_type                 = "t3.small"
-      additional_userdata           = "echo foo bar"
-      asg_desired_capacity          = 2
-      additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
-    },
-    {
-      name                          = "worker-group-2"
-      instance_type                 = "t3.medium"
-      additional_userdata           = "echo foo bar"
-      additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
-      asg_desired_capacity          = 1
-    },
-  ]
-
-  worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
-  map_roles                            = var.map_roles
-  map_users                            = var.map_users
-  map_accounts                         = var.map_accounts
-}
diff --git a/examples/basic/outputs.tf b/examples/basic/outputs.tf
deleted file mode 100644
index a0788aff1d..0000000000
--- a/examples/basic/outputs.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
-
diff --git a/examples/basic/variables.tf b/examples/basic/variables.tf
deleted file mode 100644
index 7085aeabd4..0000000000
--- a/examples/basic/variables.tf
+++ /dev/null
@@ -1,52 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
-variable "map_accounts" {
-  description = "Additional AWS account numbers to add to the aws-auth configmap."
-  type        = list(string)
-
-  default = [
-    "777777777777",
-    "888888888888",
-  ]
-}
-
-variable "map_roles" {
-  description = "Additional IAM roles to add to the aws-auth configmap."
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      rolearn  = "arn:aws:iam::66666666666:role/role1"
-      username = "role1"
-      groups   = ["system:masters"]
-    },
-  ]
-}
-
-variable "map_users" {
-  description = "Additional IAM users to add to the aws-auth configmap."
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user1"
-      username = "user1"
-      groups   = ["system:masters"]
-    },
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user2"
-      username = "user2"
-      groups   = ["system:masters"]
-    },
-  ]
-}
diff --git a/examples/basic/versions.tf b/examples/basic/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/basic/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md
deleted file mode 100644
index cc73eb7634..0000000000
--- a/examples/bottlerocket/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# AWS Bottlerocket based nodes
-
-This is a minimalistic example that shows how to use functionality of this module to deploy
-nodes based on [AWS Bottlerocket container OS](https://github.com/bottlerocket-os/bottlerocket)
-
-Example is minimalistic by purpose - it shows what knobs to turn to make Bottlerocket work.
-Do not use default VPC for your workloads deployment.
\ No newline at end of file
diff --git a/examples/bottlerocket/data.tf b/examples/bottlerocket/data.tf
deleted file mode 100644
index bf380b382c..0000000000
--- a/examples/bottlerocket/data.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-data "aws_ami" "bottlerocket_ami" {
-  most_recent = true
-  owners      = ["amazon"]
-  filter {
-    name   = "name"
-    values = ["bottlerocket-aws-k8s-${var.k8s_version}-x86_64-*"]
-  }
-}
-
-data "aws_region" "current" {}
-
-data "aws_vpc" "default" {
-  default = true
-}
-
-data "aws_subnet_ids" "default" {
-  vpc_id = data.aws_vpc.default.id
-}
-
-data "aws_iam_policy" "ssm" {
-  arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
-}
\ No newline at end of file
diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf
deleted file mode 100644
index 86166df0e5..0000000000
--- a/examples/bottlerocket/main.tf
+++ /dev/null
@@ -1,64 +0,0 @@
-terraform {
-  required_version = ">= 0.13.0"
-}
-
-resource "tls_private_key" "nodes" {
-  algorithm = "RSA"
-}
-
-resource "aws_key_pair" "nodes" {
-  key_name   = "bottlerocket-nodes"
-  public_key = tls_private_key.nodes.public_key_openssh
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = "bottlerocket"
-  cluster_version = var.k8s_version
-  subnets         = data.aws_subnet_ids.default.ids
-
-  vpc_id = data.aws_vpc.default.id
-
-  write_kubeconfig = false
-  manage_aws_auth  = false
-
-  worker_groups_launch_template = [
-    {
-      name = "bottlerocket-nodes"
-      # passing bottlerocket ami id
-      ami_id               = data.aws_ami.bottlerocket_ami.id
-      instance_type        = "t3a.small"
-      asg_desired_capacity = 2
-      key_name             = aws_key_pair.nodes.key_name
-
-      # Since we are using default VPC there is no NAT gateway so we need to
-      # attach public ip to nodes so they can reach k8s API server
-      # do not repeat this at home (i.e. production)
-      public_ip = true
-
-      # This section overrides default userdata template to pass bottlerocket
-      # specific user data
-      userdata_template_file = "${path.module}/userdata.toml"
-      # we are using this section to pass additional arguments for
-      # userdata template rendering
-      userdata_template_extra_args = {
-        enable_admin_container   = var.enable_admin_container
-        enable_control_container = var.enable_control_container
-        aws_region               = data.aws_region.current.name
-      }
-      # example of k8s/kubelet configuration via additional_userdata
-      additional_userdata = <<EOT
-[settings.kubernetes.node-labels]
-ingress = "allowed"
-EOT
-    }
-  ]
-}
-
-# SSM policy for bottlerocket control container access
-# https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#enabling-ssm
-resource "aws_iam_policy_attachment" "ssm" {
-  name       = "ssm"
-  roles      = [module.eks.worker_iam_role_name]
-  policy_arn = data.aws_iam_policy.ssm.arn
-}
diff --git a/examples/bottlerocket/userdata.toml b/examples/bottlerocket/userdata.toml
deleted file mode 100644
index 1dc9273f58..0000000000
--- a/examples/bottlerocket/userdata.toml
+++ /dev/null
@@ -1,24 +0,0 @@
-# https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#description-of-settings
-[settings.kubernetes]
-api-server = "${endpoint}"
-cluster-certificate = "${cluster_auth_base64}"
-cluster-name = "${cluster_name}"
-${additional_userdata}
-
-# Hardening based on https://github.com/bottlerocket-os/bottlerocket/blob/develop/SECURITY_GUIDANCE.md
-
-# Enable kernel lockdown in "integrity" mode.
-# This prevents modifications to the running kernel, even by privileged users.
-[settings.kernel]
-lockdown = "integrity"
-
-# The admin host container provides SSH access and runs with "superpowers".
-# It is disabled by default, but can be disabled explicitly.
-[settings.host-containers.admin]
-enabled = ${enable_admin_container}
-
-# The control host container provides out-of-band access via SSM.
-# It is enabled by default, and can be disabled if you do not expect to use SSM.
-# This could leave you with no way to access the API and change settings on an existing node!
-[settings.host-containers.control]
-enabled = ${enable_control_container}
\ No newline at end of file
diff --git a/examples/bottlerocket/variables.tf b/examples/bottlerocket/variables.tf
deleted file mode 100644
index 6eebc3e444..0000000000
--- a/examples/bottlerocket/variables.tf
+++ /dev/null
@@ -1,17 +0,0 @@
-variable "k8s_version" {
-  description = "k8s cluster version"
-  default     = "1.20"
-  type        = string
-}
-
-variable "enable_admin_container" {
-  description = "Enable/disable admin container"
-  default     = false
-  type        = bool
-}
-
-variable "enable_control_container" {
-  description = "Enable/disable control container"
-  default     = true
-  type        = bool
-}
\ No newline at end of file
diff --git a/examples/create_false/main.tf b/examples/create_false/main.tf
deleted file mode 100644
index 0afffcd2b4..0000000000
--- a/examples/create_false/main.tf
+++ /dev/null
@@ -1,30 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  count = 0
-  name  = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  count = 0
-  name  = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = element(concat(data.aws_eks_cluster.cluster[*].endpoint, [""]), 0)
-  cluster_ca_certificate = base64decode(element(concat(data.aws_eks_cluster.cluster[*].certificate_authority.0.data, [""]), 0))
-  token                  = element(concat(data.aws_eks_cluster_auth.cluster[*].token, [""]), 0)
-  load_config_file       = false
-}
-
-module "eks" {
-  source          = "../.."
-  create_eks      = false
-  cluster_version = ""
-
-  vpc_id       = ""
-  cluster_name = ""
-  subnets      = []
-}
diff --git a/examples/create_false/variables.tf b/examples/create_false/variables.tf
deleted file mode 100644
index 81b8dbe73e..0000000000
--- a/examples/create_false/variables.tf
+++ /dev/null
@@ -1,3 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
diff --git a/examples/create_false/versions.tf b/examples/create_false/versions.tf
deleted file mode 100644
index 9b73d00523..0000000000
--- a/examples/create_false/versions.tf
+++ /dev/null
@@ -1,8 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/eks-auto-mode/README.md b/examples/eks-auto-mode/README.md
new file mode 100644
index 0000000000..57da18f875
--- /dev/null
+++ b/examples/eks-auto-mode/README.md
@@ -0,0 +1,96 @@
+# EKS Auto Mode
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+terraform init
+terraform plan
+terraform apply --auto-approve
+```
+
+Once the cluster has finished provisioning, you can use the `kubectl` command to interact with the cluster. For example, to deploy a sample deployment and see EKS Auto Mode in action, run:
+
+```bash
+aws eks update-kubeconfig --name $(terraform output -raw cluster_name)
+kubectl apply -f deployment.yaml
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_disabled_eks"></a> [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
+| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
+| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_access_entries"></a> [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes |
+| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| <a name="output_cluster_dualstack_oidc_issuer_url"></a> [cluster\_dualstack\_oidc\_issuer\_url](#output\_cluster\_dualstack\_oidc\_issuer\_url) | Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | Cluster IAM role ARN |
+| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | Cluster IAM role name |
+| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
+| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| <a name="output_cluster_ip_family"></a> [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) |
+| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
+| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| <a name="output_cluster_service_cidr"></a> [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from |
+| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| <a name="output_cluster_tls_certificate_sha1_fingerprint"></a> [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate |
+| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| <a name="output_eks_managed_node_groups_autoscaling_group_names"></a> [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups |
+| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| <a name="output_kms_key_arn"></a> [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key |
+| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key |
+| <a name="output_kms_key_policy"></a> [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key |
+| <a name="output_node_iam_role_arn"></a> [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | EKS Auto node IAM role ARN |
+| <a name="output_node_iam_role_name"></a> [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | EKS Auto node IAM role name |
+| <a name="output_node_iam_role_unique_id"></a> [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| <a name="output_oidc_provider"></a> [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
+| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| <a name="output_self_managed_node_groups_autoscaling_group_names"></a> [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups |
+<!-- END_TF_DOCS -->
diff --git a/examples/eks-auto-mode/deployment.yaml b/examples/eks-auto-mode/deployment.yaml
new file mode 100644
index 0000000000..a49fc35f4c
--- /dev/null
+++ b/examples/eks-auto-mode/deployment.yaml
@@ -0,0 +1,21 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: inflate
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: inflate
+  template:
+    metadata:
+      labels:
+        app: inflate
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: inflate
+          image: public.ecr.aws/eks-distro/kubernetes/pause:3.10
+          resources:
+            requests:
+              cpu: 1
diff --git a/examples/eks-auto-mode/main.tf b/examples/eks-auto-mode/main.tf
new file mode 100644
index 0000000000..c99c60c970
--- /dev/null
+++ b/examples/eks-auto-mode/main.tf
@@ -0,0 +1,86 @@
+provider "aws" {
+  region = local.region
+}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+locals {
+  name               = "ex-${basename(path.cwd)}"
+  kubernetes_version = "1.33"
+  region             = "us-west-2"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Test       = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+  source = "../.."
+
+  name                   = local.name
+  kubernetes_version     = local.kubernetes_version
+  endpoint_public_access = true
+
+  enable_cluster_creator_admin_permissions = true
+
+  compute_config = {
+    enabled    = true
+    node_pools = ["general-purpose"]
+  }
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  tags = local.tags
+}
+
+module "disabled_eks" {
+  source = "../.."
+
+  create = false
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
diff --git a/examples/eks-auto-mode/outputs.tf b/examples/eks-auto-mode/outputs.tf
new file mode 100644
index 0000000000..9ed8c27220
--- /dev/null
+++ b/examples/eks-auto-mode/outputs.tf
@@ -0,0 +1,245 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+  description = "The Amazon Resource Name (ARN) of the cluster"
+  value       = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+  description = "Base64 encoded certificate data required to communicate with the cluster"
+  value       = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+  description = "Endpoint for your Kubernetes API server"
+  value       = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+  description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
+  value       = module.eks.cluster_id
+}
+
+output "cluster_name" {
+  description = "The name of the EKS cluster"
+  value       = module.eks.cluster_name
+}
+
+output "cluster_oidc_issuer_url" {
+  description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_dualstack_oidc_issuer_url" {
+  description = "Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_dualstack_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+  description = "Platform version for the cluster"
+  value       = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+  description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+  value       = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+  description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+  value       = module.eks.cluster_primary_security_group_id
+}
+
+output "cluster_service_cidr" {
+  description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from"
+  value       = module.eks.cluster_service_cidr
+}
+
+output "cluster_ip_family" {
+  description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)"
+  value       = module.eks.cluster_ip_family
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+output "access_entries" {
+  description = "Map of access entries created and their attributes"
+  value       = module.eks.access_entries
+}
+
+################################################################################
+# KMS Key
+################################################################################
+
+output "kms_key_arn" {
+  description = "The Amazon Resource Name (ARN) of the key"
+  value       = module.eks.kms_key_arn
+}
+
+output "kms_key_id" {
+  description = "The globally unique identifier for the key"
+  value       = module.eks.kms_key_id
+}
+
+output "kms_key_policy" {
+  description = "The IAM resource policy set on the key"
+  value       = module.eks.kms_key_policy
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the cluster security group"
+  value       = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+  description = "ID of the cluster security group"
+  value       = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the node shared security group"
+  value       = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+  description = "ID of the node shared security group"
+  value       = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider" {
+  description = "The OpenID Connect identity provider (issuer URL without leading `https://`)"
+  value       = module.eks.oidc_provider
+}
+
+output "oidc_provider_arn" {
+  description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+  value       = module.eks.oidc_provider_arn
+}
+
+output "cluster_tls_certificate_sha1_fingerprint" {
+  description = "The SHA1 fingerprint of the public key of the cluster's certificate"
+  value       = module.eks.cluster_tls_certificate_sha1_fingerprint
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+  description = "Cluster IAM role name"
+  value       = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+  description = "Cluster IAM role ARN"
+  value       = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+output "node_iam_role_name" {
+  description = "EKS Auto node IAM role name"
+  value       = module.eks.node_iam_role_name
+}
+
+output "node_iam_role_arn" {
+  description = "EKS Auto node IAM role ARN"
+  value       = module.eks.node_iam_role_arn
+}
+
+output "node_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.node_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+  description = "Map of attribute maps for all EKS cluster addons enabled"
+  value       = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+  description = "Map of attribute maps for all EKS identity providers enabled"
+  value       = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+  description = "Name of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+  description = "Arn of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+  description = "Map of attribute maps for all EKS Fargate Profiles created"
+  value       = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+  description = "Map of attribute maps for all EKS managed node groups created"
+  value       = module.eks.eks_managed_node_groups
+}
+
+output "eks_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by EKS managed node groups"
+  value       = module.eks.eks_managed_node_groups_autoscaling_group_names
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+  description = "Map of attribute maps for all self managed node groups created"
+  value       = module.eks.self_managed_node_groups
+}
+
+output "self_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by self-managed node groups"
+  value       = module.eks.self_managed_node_groups_autoscaling_group_names
+}
diff --git a/examples/eks-auto-mode/variables.tf b/examples/eks-auto-mode/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/eks-auto-mode/versions.tf b/examples/eks-auto-mode/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/examples/eks-auto-mode/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/examples/eks-hybrid-nodes/.gitignore b/examples/eks-hybrid-nodes/.gitignore
new file mode 100644
index 0000000000..3a7d063a21
--- /dev/null
+++ b/examples/eks-hybrid-nodes/.gitignore
@@ -0,0 +1,2 @@
+*.pem
+*.sh
diff --git a/examples/eks-hybrid-nodes/README.md b/examples/eks-hybrid-nodes/README.md
new file mode 100644
index 0000000000..94ccb4eb4d
--- /dev/null
+++ b/examples/eks-hybrid-nodes/README.md
@@ -0,0 +1,85 @@
+# EKS Hybrid Nodes
+
+> [!CAUTION]
+> EC2 instances are not supported with EKS Hybrid Nodes. This example is provided for reference only in lieu of users having to provision a VM in their own environment.
+
+## Usage
+
+> [!NOTE]
+> The [Packer CLI](https://developer.hashicorp.com/packer/tutorials/docker-get-started/get-started-install-cli) is required to build a custom AMI for the Hybrid node used in the example.
+
+To provision the provided configurations you need to execute:
+
+```bash
+terraform init
+terraform apply -target=module.remote_node_vpc -target=local_file.key_pem -target=module.key_pair --auto-approve
+cd ami && packer build -var 'ssh_keypair_name=hybrid-node' -var 'ssh_private_key_file=../key.pem' . && cd -
+terraform apply --auto-approve
+./join.sh
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 3.0 |
+| <a name="requirement_http"></a> [http](#requirement\_http) | >= 3.4 |
+| <a name="requirement_local"></a> [local](#requirement\_local) | >= 2.5 |
+| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 4.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+| <a name="provider_aws.remote"></a> [aws.remote](#provider\_aws.remote) | >= 6.0 |
+| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 3.0 |
+| <a name="provider_http"></a> [http](#provider\_http) | >= 3.4 |
+| <a name="provider_local"></a> [local](#provider\_local) | >= 2.5 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
+| <a name="module_eks_hybrid_node_role"></a> [eks\_hybrid\_node\_role](#module\_eks\_hybrid\_node\_role) | ../../modules/hybrid-node-role | n/a |
+| <a name="module_key_pair"></a> [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 |
+| <a name="module_remote_node_vpc"></a> [remote\_node\_vpc](#module\_remote\_node\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_instance.hybrid_node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource |
+| [aws_route.peer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource |
+| [aws_route.remote_node_private](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource |
+| [aws_route.remote_node_public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource |
+| [aws_security_group.remote_node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_ssm_activation.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_activation) | resource |
+| [aws_vpc_peering_connection.remote_node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_peering_connection) | resource |
+| [aws_vpc_peering_connection_accepter.peer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_peering_connection_accepter) | resource |
+| [aws_vpc_security_group_egress_rule.remote_node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_egress_rule) | resource |
+| [aws_vpc_security_group_ingress_rule.remote_node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource |
+| [helm_release.cilium](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [local_file.join](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.key_pub_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [aws_ami.hybrid_node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_availability_zones.remote](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [http_http.icanhazip](https://registry.terraform.io/providers/hashicorp/http/latest/docs/data-sources/http) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+No outputs.
+<!-- END_TF_DOCS -->
diff --git a/examples/eks-hybrid-nodes/ami/amazon-eks-ubuntu.pkr.hcl b/examples/eks-hybrid-nodes/ami/amazon-eks-ubuntu.pkr.hcl
new file mode 100644
index 0000000000..02c54ea647
--- /dev/null
+++ b/examples/eks-hybrid-nodes/ami/amazon-eks-ubuntu.pkr.hcl
@@ -0,0 +1,320 @@
+locals {
+  timestamp = regex_replace(timestamp(), "[- TZ:]", "")
+
+  ami_name = "${var.ami_name_prefix}-${var.eks_version}-amd64-${local.timestamp}"
+
+  tags = {
+    SourceAMI    = "{{ .SourceAMI }}"
+    Name         = local.ami_name
+    Architecture = "amd64"
+  }
+}
+
+data "amazon-parameterstore" "this" {
+  name = "/aws/service/canonical/ubuntu/server-minimal/22.04/stable/current/amd64/hvm/ebs-gp2/ami-id"
+  region = var.region
+}
+
+################################################################################
+# EBS Source
+################################################################################
+
+source "amazon-ebs" "this" {
+
+  # AMI Configuration
+  dynamic "ami_block_device_mappings" {
+    for_each = var.ami_block_device_mappings
+
+    content {
+      delete_on_termination = try(ami_block_device_mappings.value.delete_on_termination, true)
+      device_name           = try(ami_block_device_mappings.value.device_name, null)
+      encrypted             = try(ami_block_device_mappings.value.encrypted, null)
+      iops                  = try(ami_block_device_mappings.value.iops, null)
+      no_device             = try(ami_block_device_mappings.value.no_device, null)
+      snapshot_id           = try(ami_block_device_mappings.value.snapshot_id, null)
+      throughput            = try(ami_block_device_mappings.value.throughput, null)
+      virtual_name          = try(ami_block_device_mappings.value.virtual_name, null)
+      volume_size           = try(ami_block_device_mappings.value.volume_size, 4)
+      volume_type           = try(ami_block_device_mappings.value.volume_type, "gp3")
+      kms_key_id            = try(ami_block_device_mappings.value.kms_key_id, null)
+    }
+  }
+
+  ami_description         = var.ami_description
+  ami_groups              = var.ami_groups
+  ami_name                = local.ami_name
+  ami_org_arns            = var.ami_org_arns
+  ami_ou_arns             = var.ami_ou_arns
+  ami_regions             = var.ami_regions
+  ami_users               = var.ami_users
+  ami_virtualization_type = var.ami_virtualization_type
+  deprecate_at            = var.deprecate_at
+  ena_support             = var.ena_support
+  encrypt_boot            = var.encrypt_boot
+  force_deregister        = var.force_deregister
+  force_delete_snapshot   = var.force_delete_snapshot
+  imds_support            = var.imds_support
+  kms_key_id              = var.kms_key_id
+
+  dynamic "launch_block_device_mappings" {
+    for_each = length(var.launch_block_device_mappings) > 0 ? var.launch_block_device_mappings : var.ami_block_device_mappings
+
+    content {
+      delete_on_termination = try(launch_block_device_mappings.value.delete_on_termination, true)
+      device_name           = try(launch_block_device_mappings.value.device_name, null)
+      encrypted             = try(launch_block_device_mappings.value.encrypted, null)
+      iops                  = try(launch_block_device_mappings.value.iops, null)
+      no_device             = try(launch_block_device_mappings.value.no_device, null)
+      snapshot_id           = try(launch_block_device_mappings.value.snapshot_id, null)
+      throughput            = try(launch_block_device_mappings.value.throughput, null)
+      virtual_name          = try(launch_block_device_mappings.value.virtual_name, null)
+      volume_size           = try(launch_block_device_mappings.value.volume_size, 4)
+      volume_type           = try(launch_block_device_mappings.value.volume_type, "gp3")
+    }
+  }
+
+  region_kms_key_ids     = var.region_kms_key_ids
+  run_volume_tags        = var.run_volume_tags
+  skip_create_ami        = var.skip_create_ami
+  skip_region_validation = var.skip_region_validation
+  skip_save_build_region = var.skip_save_build_region
+  sriov_support          = var.sriov_support
+  snapshot_groups        = var.snapshot_groups
+  snapshot_tags          = var.snapshot_tags
+  snapshot_users         = var.snapshot_users
+  tags                   = merge(local.tags, var.tags)
+
+  # Access Configuration
+  access_key = var.access_key
+
+  dynamic "assume_role" {
+    for_each = length(var.assume_role) > 0 ? [var.assume_role] : []
+
+    content {
+      duration_seconds    = try(assume_role.value.duration_seconds, null)
+      external_id         = try(assume_role.value.external_id, null)
+      policy              = try(assume_role.value.policy, null)
+      policy_arns         = try(assume_role.value.policy_arns, null)
+      role_arn            = try(assume_role.value.role_arn, null)
+      session_name        = try(assume_role.value.session_name, null)
+      tag                 = try(assume_role.value.tag, null)
+      transitive_tag_keys = try(assume_role.value.transitive_tag_keys, null)
+    }
+  }
+
+  dynamic "aws_polling" {
+    for_each = length(var.aws_polling) > 0 ? [var.aws_polling] : []
+
+    content {
+      delay_seconds = try(aws_polling.value.delay_seconds, null)
+      max_attempts  = try(aws_polling.value.max_attempts, null)
+    }
+  }
+
+  custom_endpoint_ec2           = var.custom_endpoint_ec2
+  decode_authorization_messages = var.decode_authorization_messages
+  insecure_skip_tls_verify      = var.insecure_skip_tls_verify
+  max_retries                   = var.max_retries
+  mfa_code                      = var.mfa_code
+  profile                       = var.profile
+  region                        = var.region
+  secret_key                    = var.secret_key
+  shared_credentials_file       = var.shared_credentials_file
+  skip_credential_validation    = var.skip_credential_validation
+  skip_metadata_api_check       = var.skip_metadata_api_check
+  token                         = var.token
+
+  # Communicator
+  communicator                 = var.communicator
+  pause_before_connecting      = var.pause_before_connecting
+  ssh_agent_auth               = var.ssh_agent_auth
+  ssh_bastion_agent_auth       = var.ssh_bastion_agent_auth
+  ssh_bastion_certificate_file = var.ssh_bastion_certificate_file
+  ssh_bastion_host             = var.ssh_bastion_host
+  ssh_bastion_interactive      = var.ssh_bastion_interactive
+  ssh_bastion_password         = var.ssh_bastion_password
+  ssh_bastion_port             = var.ssh_bastion_port
+  ssh_bastion_private_key_file = var.ssh_bastion_private_key_file
+  ssh_bastion_username         = var.ssh_bastion_username
+  ssh_ciphers                  = var.ssh_ciphers
+  ssh_certificate_file         = var.ssh_certificate_file
+  ssh_clear_authorized_keys    = var.ssh_clear_authorized_keys
+  ssh_disable_agent_forwarding = var.ssh_disable_agent_forwarding
+  ssh_file_transfer_method     = var.ssh_file_transfer_method
+  ssh_handshake_attempts       = var.ssh_handshake_attempts
+  ssh_host                     = var.ssh_host
+  ssh_interface                = var.ssh_interface # "public_dns"
+  ssh_keep_alive_interval      = var.ssh_keep_alive_interval
+  ssh_key_exchange_algorithms  = var.ssh_key_exchange_algorithms
+  ssh_keypair_name             = var.ssh_keypair_name
+  ssh_local_tunnels            = var.ssh_local_tunnels
+  ssh_password                 = var.ssh_password
+  ssh_port                     = var.ssh_port
+  ssh_private_key_file         = var.ssh_private_key_file
+  ssh_proxy_host               = var.ssh_proxy_host
+  ssh_proxy_password           = var.ssh_proxy_password
+  ssh_proxy_port               = var.ssh_proxy_port
+  ssh_proxy_username           = var.ssh_proxy_username
+  ssh_pty                      = var.ssh_pty
+  ssh_read_write_timeout       = var.ssh_read_write_timeout
+  ssh_remote_tunnels           = var.ssh_remote_tunnels
+  ssh_timeout                  = var.ssh_timeout
+  ssh_username                 = var.ssh_username
+  temporary_key_pair_bits      = var.temporary_key_pair_bits
+  temporary_key_pair_type      = var.temporary_key_pair_type
+
+  # Run Configuration
+  associate_public_ip_address     = var.associate_public_ip_address
+  capacity_reservation_preference = var.capacity_reservation_preference
+  capacity_reservation_group_arn  = var.capacity_reservation_group_arn
+  capacity_reservation_id         = var.capacity_reservation_id
+  disable_stop_instance           = var.disable_stop_instance
+  ebs_optimized                   = var.ebs_optimized
+  enable_nitro_enclave            = var.enable_nitro_enclave
+  enable_unlimited_credits        = var.enable_unlimited_credits
+  iam_instance_profile            = var.iam_instance_profile
+  instance_type                   = var.instance_type
+  fleet_tags                      = var.fleet_tags
+  pause_before_ssm                = var.pause_before_ssm
+
+  dynamic "placement" {
+    for_each = length(var.placement) > 0 ? [var.placement] : []
+
+    content {
+      host_resource_group_arn = try(placement.value.host_resource_group_arn, null)
+      tenancy                 = try(placement.value.tenancy, null)
+    }
+  }
+
+  run_tags           = merge(local.tags, var.run_tags)
+  security_group_ids = var.security_group_ids
+
+  dynamic "security_group_filter" {
+    for_each = length(var.security_group_filter) > 0 ? var.security_group_filter : []
+
+    content {
+      filters = try(security_group_filter.value.filters, null)
+    }
+  }
+
+  session_manager_port    = var.session_manager_port
+  shutdown_behavior       = var.shutdown_behavior
+  skip_profile_validation = var.skip_profile_validation
+  source_ami              = data.amazon-parameterstore.this.value
+
+  dynamic "subnet_filter" {
+    for_each = length(var.subnet_filter) > 0 ? [var.subnet_filter] : []
+
+    content {
+      filters   = try(subnet_filter.value.filters, null)
+      most_free = try(subnet_filter.value.most_free, null)
+      random    = try(subnet_filter.value.random, null)
+    }
+  }
+
+  subnet_id = var.subnet_id
+
+  dynamic "temporary_iam_instance_profile_policy_document" {
+    for_each = length(var.temporary_iam_instance_profile_policy_document) > 0 ? [var.temporary_iam_instance_profile_policy_document] : []
+
+    content {
+      dynamic "Statement" {
+        for_each = temporary_iam_instance_profile_policy_document.value
+
+        content {
+          Action   = try(Statement.value.Action, [])
+          Effect   = try(Statement.value.Effect, "Allow")
+          Resource = try(Statement.value.Resource, ["*"])
+        }
+      }
+      Version = "2012-10-17"
+    }
+  }
+
+  temporary_security_group_source_cidrs     = var.temporary_security_group_source_cidrs
+  temporary_security_group_source_public_ip = var.temporary_security_group_source_public_ip
+  user_data                                 = var.user_data
+  user_data_file                            = var.user_data_file
+
+  dynamic "vpc_filter" {
+    for_each = length(var.vpc_filter) > 0 ? var.vpc_filter : []
+
+    content {
+      filters = try(vpc_filter.value.filters, null)
+    }
+  }
+
+  vpc_id = var.vpc_id
+
+  dynamic "metadata_options" {
+    for_each = length(var.metadata_options) > 0 ? [var.metadata_options] : []
+
+    content {
+      http_endpoint               = try(metadata_options.value.http_endpoint, null)
+      http_put_response_hop_limit = try(metadata_options.value.http_put_response_hop_limit, null)
+      http_tokens                 = try(metadata_options.value.http_tokens, null)
+      instance_metadata_tags      = try(metadata_options.value.instance_metadata_tags, null)
+    }
+  }
+}
+
+################################################################################
+# Build
+################################################################################
+
+build {
+  sources = ["source.amazon-ebs.this"]
+
+  provisioner "shell" {
+    execute_command = "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'"
+
+    env = {
+      DEBIAN_FRONTEND = "noninteractive"
+    }
+
+    expect_disconnect = true
+
+    inline = [
+      "cloud-init status --wait",
+      "apt update",
+      "apt upgrade -y",
+      "apt install iptables conntrack -y",
+      "systemctl reboot",
+    ]
+
+    pause_after = "15s"
+  }
+
+  provisioner "shell" {
+    execute_command = "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'"
+
+    env = {
+      DEBIAN_FRONTEND = "noninteractive"
+    }
+
+    inline = [
+
+      "snap install aws-cli --classic",
+      "snap switch --channel=candidate amazon-ssm-agent",
+      "curl -OL 'https://hybrid-assets.eks.amazonaws.com/releases/latest/bin/linux/amd64/nodeadm'",
+      "mv nodeadm /usr/bin/nodeadm",
+      "chmod +x /usr/bin/nodeadm",
+      "nodeadm install ${var.eks_version} --credential-provider ${var.credential_provider}",
+    ]
+  }
+
+  provisioner "shell" {
+    execute_command = "echo 'packer' | sudo -S sh -c '{{ .Vars }} {{ .Path }}'"
+
+    env = {
+      DEBIAN_FRONTEND = "noninteractive"
+    }
+
+    inline = [
+      "apt --purge autoremove -y",
+      "cloud-init clean --logs --machine-id",
+      "mkdir -p /etc/amazon/ssm",
+      "cp $(find / -name '*seelog.xml.template') /etc/amazon/ssm/seelog.xml",
+    ]
+  }
+}
diff --git a/examples/eks-hybrid-nodes/ami/plugins.pkr.hcl b/examples/eks-hybrid-nodes/ami/plugins.pkr.hcl
new file mode 100644
index 0000000000..24a7f5b8f5
--- /dev/null
+++ b/examples/eks-hybrid-nodes/ami/plugins.pkr.hcl
@@ -0,0 +1,8 @@
+packer {
+  required_plugins {
+    amazon = {
+      version = "~> 1.2"
+      source  = "github.com/hashicorp/amazon"
+    }
+  }
+}
diff --git a/examples/eks-hybrid-nodes/ami/variables.pkr.hcl b/examples/eks-hybrid-nodes/ami/variables.pkr.hcl
new file mode 100644
index 0000000000..1eb9c4726b
--- /dev/null
+++ b/examples/eks-hybrid-nodes/ami/variables.pkr.hcl
@@ -0,0 +1,723 @@
+variable "ami_name_prefix" {
+  description = "The prefix to use when creating the AMI name. i.e. - `<ami_name_prefix>-<eks_version>-<architecture>-<timestamp>"
+  type        = string
+  default     = "eks-hybrid-ubuntu"
+}
+
+variable "eks_version" {
+  description = "The EKS cluster version associated with the AMI created"
+  type        = string
+  default     = "1.33"
+}
+
+variable "credential_provider" {
+  description = "The credential provider to use with the Hybrid Node role"
+  type        = string
+  default     = "ssm"
+}
+
+variable "cpu_architecture" {
+  description = "The CPU architecture. Either `amd64` or `arm64`"
+  type        = string
+  default     = "amd64"
+}
+
+################################################################################
+# EBS Source
+################################################################################
+
+variable "ami_block_device_mappings" {
+  description = "The block device mappings attached when booting a new instance from the AMI created"
+  type        = list(map(string))
+  default = [
+    {
+      device_name           = "/dev/sda1"
+      volume_size           = 24
+      volume_type           = "gp3"
+      delete_on_termination = true
+    },
+  ]
+}
+
+variable "ami_description" {
+  description = "The description to use when creating the AMI"
+  type        = string
+  default     = "EKS Hybrid Node demonstration AMI"
+}
+
+variable "ami_groups" {
+  description = "A list of groups that have access to launch the resulting AMI(s). By default no groups have permission to launch the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't accept any value other than `all`"
+  type        = list(string)
+  default     = null
+}
+
+variable "ami_org_arns" {
+  description = "A list of Amazon Resource Names (ARN) of AWS Organizations that have access to launch the resulting AMI(s). By default no organizations have permission to launch the AMI"
+  type        = list(string)
+  default     = null
+}
+
+variable "ami_ou_arns" {
+  description = "A list of Amazon Resource Names (ARN) of AWS Organizations organizational units (OU) that have access to launch the resulting AMI(s). By default no organizational units have permission to launch the AMI"
+  type        = list(string)
+  default     = null
+}
+
+variable "ami_regions" {
+  description = "A list of regions to copy the AMI to. Tags and attributes are copied along with the AMI. AMI copying takes time depending on the size of the AMI, but will generally take many minutes"
+  type        = list(string)
+  default     = null
+}
+
+variable "ami_users" {
+  description = "A list of account IDs that have access to launch the resulting AMI(s). By default no additional users other than the user creating the AMI has permissions to launch it"
+  type        = list(string)
+  default     = null
+}
+
+variable "ami_virtualization_type" {
+  description = "The type of virtualization used to create the AMI. Can be one of `hvm` or `paravirtual`"
+  type        = string
+  default     = "hvm"
+}
+
+variable "deprecate_at" {
+  description = "The date and time to deprecate the AMI, in UTC, in the following format: YYYY-MM-DDTHH:MM:SSZ. If you specify a value for seconds, Amazon EC2 rounds the seconds to the nearest minute"
+  type        = string
+  default     = null
+}
+
+variable "ena_support" {
+  description = "Enable enhanced networking (ENA but not SriovNetSupport) on HVM-compatible AMIs"
+  type        = bool
+  default     = null
+}
+
+variable "encrypt_boot" {
+  description = "Whether or not to encrypt the resulting AMI when copying a provisioned instance to an AMI. By default, Packer will keep the encryption setting to what it was in the source image"
+  type        = bool
+  default     = null
+}
+
+variable "force_deregister" {
+  description = "Force Packer to first deregister an existing AMI if one with the same name already exists. Default `false`"
+  type        = bool
+  default     = null
+}
+
+variable "force_delete_snapshot" {
+  description = "Force Packer to delete snapshots associated with AMIs, which have been deregistered by force_deregister. Default `false`"
+  type        = bool
+  default     = null
+}
+
+variable "imds_support" {
+  description = "Enforce version of the Instance Metadata Service on the built AMI. Valid options are `unset` (legacy) and `v2.0`"
+  type        = string
+  default     = "v2.0"
+}
+
+variable "kms_key_id" {
+  description = "ID, alias or ARN of the KMS key to use for AMI encryption. This only applies to the main `region` -- any regions the AMI gets copied to copied will be encrypted by the default EBS KMS key for that region, unless you set region-specific keys in `region_kms_key_ids`"
+  type        = string
+  default     = null
+}
+
+variable "launch_block_device_mappings" {
+  description = "The block device mappings to use when creating the AMI. If you add instance store volumes or EBS volumes in addition to the root device volume, the created AMI will contain block device mapping information for those volumes. Amazon creates snapshots of the source instance's root volume and any other EBS volumes described here. When you launch an instance from this new AMI, the instance automatically launches with these additional volumes, and will restore them from snapshots taken from the source instance"
+  type        = list(map(string))
+  default = [
+    {
+      device_name           = "/dev/sda1"
+      volume_size           = 24
+      volume_type           = "gp3"
+      delete_on_termination = true
+    },
+  ]
+}
+
+variable "region_kms_key_ids" {
+  description = "regions to copy the ami to, along with the custom kms key id (alias or arn) to use for encryption for that region. Keys must match the regions provided in `ami_regions`"
+  type        = map(string)
+  default     = null
+}
+
+variable "run_volume_tags" {
+  description = "Tags to apply to the volumes that are launched to create the AMI. These tags are not applied to the resulting AMI"
+  type        = map(string)
+  default     = null
+}
+
+variable "skip_create_ami" {
+  description = "If `true`, Packer will not create the AMI. Useful for setting to `true` during a build test stage. Default `false`"
+  type        = bool
+  default     = null
+}
+
+variable "skip_region_validation" {
+  description = "Set to `true` if you want to skip validation of the `ami_regions` configuration option. Default `false`"
+  type        = bool
+  default     = null
+}
+
+variable "skip_save_build_region" {
+  description = "If true, Packer will not check whether an AMI with the ami_name exists in the region it is building in. It will use an intermediary AMI name, which it will not convert to an AMI in the build region. Default `false`"
+  type        = bool
+  default     = null
+}
+
+variable "sriov_support" {
+  description = "Enable enhanced networking (SriovNetSupport but not ENA) on HVM-compatible AMIs"
+  type        = bool
+  default     = null
+}
+
+variable "snapshot_groups" {
+  description = "A list of groups that have access to create volumes from the snapshot(s). By default no groups have permission to create volumes from the snapshot(s). all will make the snapshot publicly accessible"
+  type        = list(string)
+  default     = null
+}
+
+variable "snapshot_tags" {
+  description = "Key/value pair tags to apply to snapshot. They will override AMI tags if already applied to snapshot"
+  type        = map(string)
+  default     = null
+}
+
+variable "snapshot_users" {
+  description = "A list of account IDs that have access to create volumes from the snapshot(s). By default no additional users other than the user creating the AMI has permissions to create volumes from the backing snapshot(s)"
+  type        = list(string)
+  default     = null
+}
+
+variable "tags" {
+  description = "Key/value pair tags applied to the AMI"
+  type        = map(string)
+  default     = {}
+}
+
+# Access Configuration
+
+variable "access_key" {
+  description = "The access key used to communicate with AWS"
+  type        = string
+  default     = null
+}
+
+variable "assume_role" {
+  description = "If provided with a role ARN, Packer will attempt to assume this role using the supplied credentials"
+  type        = map(string)
+  default     = {}
+}
+
+variable "aws_polling" {
+  description = "Polling configuration for the AWS waiter. Configures the waiter for resources creation or actions like attaching volumes or importing image"
+  type        = map(string)
+  default     = {}
+}
+
+variable "custom_endpoint_ec2" {
+  description = "This option is useful if you use a cloud provider whose API is compatible with aws EC2"
+  type        = string
+  default     = null
+}
+
+variable "decode_authorization_messages" {
+  description = "Enable automatic decoding of any encoded authorization (error) messages using the sts:DecodeAuthorizationMessage API"
+  type        = bool
+  default     = null
+}
+
+variable "insecure_skip_tls_verify" {
+  description = "This allows skipping TLS verification of the AWS EC2 endpoint. The default is `false`"
+  type        = bool
+  default     = null
+}
+
+variable "max_retries" {
+  description = "This is the maximum number of times an API call is retried, in the case where requests are being throttled or experiencing transient failures. The delay between the subsequent API calls increases exponentially"
+  type        = number
+  default     = null
+}
+
+variable "mfa_code" {
+  description = "The MFA TOTP code. This should probably be a user variable since it changes all the time"
+  type        = string
+  default     = null
+}
+
+variable "profile" {
+  description = "The profile to use in the shared credentials file for AWS"
+  type        = string
+  default     = null
+}
+
+variable "region" {
+  description = "The name of the region, such as us-east-1, in which to launch the EC2 instance to create the AMI"
+  type        = string
+  default     = "us-east-1"
+}
+
+variable "secret_key" {
+  description = "The secret key used to communicate with AWS"
+  type        = string
+  default     = null
+}
+
+variable "shared_credentials_file" {
+  description = "Path to a credentials file to load credentials from"
+  type        = string
+  default     = null
+}
+
+variable "skip_credential_validation" {
+  description = "Set to true if you want to skip validating AWS credentials before runtime"
+  type        = bool
+  default     = null
+}
+
+variable "skip_metadata_api_check" {
+  description = "Skip Metadata Api Check"
+  type        = bool
+  default     = null
+}
+
+variable "token" {
+  description = "The access token to use. This is different from the access key and secret key"
+  type        = string
+  default     = null
+}
+
+# Communicator
+
+variable "communicator" {
+  description = "The communicator to use to communicate with the EC2 instance. Valid values are `none`, `ssh`, `winrm`, and `ssh+winrm`"
+  type        = string
+  default     = "ssh"
+}
+
+variable "pause_before_connecting" {
+  description = "We recommend that you enable SSH or WinRM as the very last step in your guest's bootstrap script, but sometimes you may have a race condition where you need Packer to wait before attempting to connect to your guest"
+  type        = string
+  default     = null
+}
+
+variable "ssh_agent_auth" {
+  description = "If true, the local SSH agent will be used to authenticate connections to the source instance. No temporary keypair will be created, and the values of `ssh_password` and `ssh_private_key_file` will be ignored. The environment variable `SSH_AUTH_SOCK` must be set for this option to work properly"
+  type        = bool
+  default     = null
+}
+
+variable "ssh_bastion_agent_auth" {
+  description = "If `true`, the local SSH agent will be used to authenticate with the bastion host. Defaults to `false`"
+  type        = bool
+  default     = null
+}
+
+variable "ssh_bastion_certificate_file" {
+  description = "Path to user certificate used to authenticate with bastion host. The ~ can be used in path and will be expanded to the home directory of current user"
+  type        = string
+  default     = null
+}
+
+variable "ssh_bastion_host" {
+  description = "A bastion host to use for the actual SSH connection"
+  type        = string
+  default     = null
+}
+
+variable "ssh_bastion_interactive" {
+  description = "If `true`, the keyboard-interactive used to authenticate with bastion host"
+  type        = bool
+  default     = null
+}
+
+variable "ssh_bastion_password" {
+  description = "The password to use to authenticate with the bastion host"
+  type        = string
+  default     = null
+}
+
+variable "ssh_bastion_port" {
+  description = "The port of the bastion host. Defaults to `22`"
+  type        = number
+  default     = null
+}
+
+variable "ssh_bastion_private_key_file" {
+  description = "Path to a PEM encoded private key file to use to authenticate with the bastion host. The `~` can be used in path and will be expanded to the home directory of current user"
+  type        = string
+  default     = null
+}
+
+variable "ssh_bastion_username" {
+  description = "The username to connect to the bastion host"
+  type        = string
+  default     = null
+}
+
+variable "ssh_ciphers" {
+  description = "This overrides the value of ciphers supported by default by Golang. The default value is `[\"aes128-gcm@openssh.com\", \"chacha20-poly1305@openssh.com\", \"aes128-ctr\", \"aes192-ctr\", \"aes256-ctr\"]`"
+  type        = list(string)
+  default     = null
+}
+
+variable "ssh_certificate_file" {
+  description = "Path to user certificate used to authenticate with SSH. The `~` can be used in path and will be expanded to the home directory of current user"
+  type        = string
+  default     = null
+}
+
+variable "ssh_clear_authorized_keys" {
+  description = "If true, Packer will attempt to remove its temporary key from `~/.ssh/authorized_keys` and `/root/.ssh/authorized_keys`"
+  type        = bool
+  default     = null
+}
+
+variable "ssh_disable_agent_forwarding" {
+  description = "If `true`, SSH agent forwarding will be disabled. Defaults to `false`"
+  type        = bool
+  default     = null
+}
+
+variable "ssh_file_transfer_method" {
+  description = "How to transfer files, Secure copy (`scp` default) or SSH File Transfer Protocol (`sftp`)"
+  type        = string
+  default     = null
+}
+
+variable "ssh_handshake_attempts" {
+  description = "The number of handshakes to attempt with SSH once it can connect. This defaults to `10`, unless a `ssh_timeout` is set"
+  type        = number
+  default     = null
+}
+
+variable "ssh_host" {
+  description = "The address to SSH to. This usually is automatically configured by the builder"
+  type        = string
+  default     = null
+}
+
+variable "ssh_interface" {
+  description = "One of `public_ip`, `private_ip`, `public_dns`, `private_dns` or `session_manager`. If set, either the public IP address, private IP address, public DNS name or private DNS name will be used as the host for SSH. The default behavior if inside a VPC is to use the public IP address if available, otherwise the private IP address will be used. If not in a VPC the public DNS name will be used"
+  type        = string
+  default     = "public_ip"
+}
+
+variable "ssh_keep_alive_interval" {
+  description = "How often to send \"keep alive\" messages to the server. Set to a negative value (`-1s`) to disable. Defaults to `5s`"
+  type        = string
+  default     = null
+}
+
+variable "ssh_key_exchange_algorithms" {
+  description = "If set, Packer will override the value of key exchange (kex) algorithms supported by default by Golang. Acceptable values include: `curve25519-sha256@libssh.org`, `ecdh-sha2-nistp256`, `ecdh-sha2-nistp384`, `ecdh-sha2-nistp521`, `diffie-hellman-group14-sha1`, and `diffie-hellman-group1-sha1`"
+  type        = list(string)
+  default     = null
+}
+
+variable "ssh_keypair_name" {
+  description = "If specified, this is the key that will be used for SSH with the machine. The key must match a key pair name loaded up into the remote"
+  type        = string
+  default     = null
+}
+
+variable "ssh_local_tunnels" {
+  description = "A list of local tunnels to use when connecting to the host"
+  type        = list(string)
+  default     = null
+}
+
+variable "ssh_password" {
+  description = "A plaintext password to use to authenticate with SSH"
+  type        = string
+  default     = null
+}
+
+variable "ssh_port" {
+  description = "The port to connect to SSH. This defaults to `22`"
+  type        = number
+  default     = null
+}
+
+variable "ssh_private_key_file" {
+  description = "Path to a PEM encoded private key file to use to authenticate with SSH. The ~ can be used in path and will be expanded to the home directory of current user"
+  type        = string
+  default     = null
+}
+
+variable "ssh_proxy_host" {
+  description = "A SOCKS proxy host to use for SSH connection"
+  type        = string
+  default     = null
+}
+
+variable "ssh_proxy_password" {
+  description = "The optional password to use to authenticate with the proxy server"
+  type        = string
+  default     = null
+}
+
+variable "ssh_proxy_port" {
+  description = "A port of the SOCKS proxy. Defaults to `1080`"
+  type        = number
+  default     = null
+}
+
+variable "ssh_proxy_username" {
+  description = "The optional username to authenticate with the proxy server"
+  type        = string
+  default     = null
+}
+
+variable "ssh_pty" {
+  description = "If `true`, a PTY will be requested for the SSH connection. This defaults to `false`"
+  type        = bool
+  default     = null
+}
+
+variable "ssh_read_write_timeout" {
+  description = "The amount of time to wait for a remote command to end. This might be useful if, for example, packer hangs on a connection after a reboot. Example: `5m`. Disabled by default"
+  type        = string
+  default     = null
+}
+
+variable "ssh_remote_tunnels" {
+  description = "A list of remote tunnels to use when connecting to the host"
+  type        = list(string)
+  default     = null
+}
+
+variable "ssh_timeout" {
+  description = "The time to wait for SSH to become available. Packer uses this to determine when the machine has booted so this is usually quite long. This defaults to `5m`, unless `ssh_handshake_attempts` is set"
+  type        = string
+  default     = null
+}
+
+variable "ssh_username" {
+  description = "The username to connect to SSH with. Required if using SSH"
+  type        = string
+  default     = "ubuntu"
+}
+
+variable "temporary_key_pair_type" {
+  description = "Specifies the type of key to create. The possible values are 'dsa', 'ecdsa', 'ed25519', or 'rsa'. Default is `ed25519`"
+  type        = string
+  default     = "ed25519"
+}
+
+variable "temporary_key_pair_bits" {
+  description = "Specifies the number of bits in the key to create. For RSA keys, the minimum size is 1024 bits and the default is 4096 bits. Generally, 3072 bits is considered sufficient"
+  type        = number
+  default     = null
+}
+
+# Run Configuration
+
+variable "associate_public_ip_address" {
+  description = "If using a non-default VPC, public IP addresses are not provided by default. If this is true, your new instance will get a Public IP"
+  type        = bool
+  default     = true
+}
+
+variable "capacity_reservation_preference" {
+  description = "Set the preference for using a capacity reservation if one exists. Either will be `open` or `none`. Defaults to `none`"
+  type        = string
+  default     = null
+}
+
+variable "capacity_reservation_group_arn" {
+  description = "Provide the EC2 Capacity Reservation Group ARN that will be used by Packer"
+  type        = string
+  default     = null
+}
+
+variable "capacity_reservation_id" {
+  description = "Provide the specific EC2 Capacity Reservation ID that will be used by Packer"
+  type        = string
+  default     = null
+}
+
+variable "disable_stop_instance" {
+  description = "If this is set to true, Packer will not stop the instance but will assume that you will send the stop signal yourself through your final provisioner"
+  type        = bool
+  default     = null
+}
+
+variable "ebs_optimized" {
+  description = "Mark instance as EBS Optimized. Default `false`"
+  type        = bool
+  default     = null
+}
+
+variable "enable_nitro_enclave" {
+  description = "Enable support for Nitro Enclaves on the instance"
+  type        = bool
+  default     = null
+}
+
+variable "enable_unlimited_credits" {
+  description = "Enabling Unlimited credits allows the source instance to burst additional CPU beyond its available CPU Credits for as long as the demand exists"
+  type        = bool
+  default     = null
+}
+
+variable "iam_instance_profile" {
+  description = "The name of an IAM instance profile to launch the EC2 instance with"
+  type        = string
+  default     = null
+}
+
+variable "instance_type" {
+  description = "The EC2 instance type to use while building the AMI, such as `m5.large`"
+  type        = string
+  default     = "c5.xlarge"
+}
+
+variable "fleet_tags" {
+  description = "Key/value pair tags to apply tags to the fleet that is issued"
+  type        = map(string)
+  default     = null
+}
+
+variable "pause_before_ssm" {
+  description = "The time to wait before establishing the Session Manager session"
+  type        = string
+  default     = null
+}
+
+variable "placement" {
+  description = "Describes the placement of an instance"
+  type        = map(string)
+  default     = {}
+}
+
+variable "run_tags" {
+  description = "Key/value pair tags to apply to the generated key-pair, security group, iam profile and role, snapshot, network interfaces and instance that is launched to create the EBS volumes. The resulting AMI will also inherit these tags"
+  type        = map(string)
+  default     = null
+}
+
+variable "security_group_ids" {
+  description = "A list of security group IDs to assign to the instance. By default this is not set and Packer will automatically create a new temporary security group to allow SSH access"
+  type        = list(string)
+  default     = null
+}
+
+variable "security_group_filter" {
+  description = "Filters used to populate the `security_group_ids` field. `security_group_ids` take precedence over this"
+  type        = list(map(string))
+  default     = []
+}
+
+variable "session_manager_port" {
+  description = "Which port to connect the local end of the session tunnel to. If left blank, Packer will choose a port for you from available ports. This option is only used when `ssh_interface` is set `session_manager`"
+  type        = number
+  default     = null
+}
+
+variable "shutdown_behavior" {
+  description = "Automatically terminate instances on shutdown in case Packer exits ungracefully. Possible values are `stop` and `terminate`. Defaults to `stop`"
+  type        = string
+  default     = null
+}
+
+variable "skip_profile_validation" {
+  description = "Whether or not to check if the IAM instance profile exists. Defaults to `false`"
+  type        = bool
+  default     = null
+}
+
+variable "subnet_filter" {
+  description = "Filters used to populate the subnet_id field. `subnet_id` take precedence over this"
+  default = {
+    filters = {
+      "tag:eks-hybrid-packer" = "true"
+    }
+    random = true
+  }
+}
+
+variable "subnet_id" {
+  description = "f using VPC, the ID of the subnet, such as subnet-12345def, where Packer will launch the EC2 instance. This field is required if you are using an non-default VPC"
+  type        = string
+  default     = null
+}
+
+variable "temporary_iam_instance_profile_policy_document" {
+  description = "Creates a temporary instance profile policy document to grant permissions to the EC2 instance. This is an alternative to using an existing `iam_instance_profile`"
+  default = [
+    {
+      Effect = "Allow"
+      Action = [
+        "ec2:Describe*",
+      ]
+      Resource = ["*"]
+    },
+  ]
+}
+
+variable "temporary_security_group_source_cidrs" {
+  description = "A list of IPv4 CIDR blocks to be authorized access to the instance, when packer is creating a temporary security group. The default is `[0.0.0.0/0]`"
+  type        = list(string)
+  default     = null
+}
+
+variable "temporary_security_group_source_public_ip" {
+  description = "When enabled, use public IP of the host (obtained from https://checkip.amazonaws.com) as CIDR block to be authorized access to the instance, when packer is creating a temporary security group. Defaults to `false`"
+  type        = bool
+  default     = null
+}
+
+variable "user_data" {
+  description = "User data to apply when launching the instance"
+  type        = string
+  default     = null
+}
+
+variable "user_data_file" {
+  description = "Path to a file that will be used for the user data when launching the instance"
+  type        = string
+  default     = null
+}
+
+variable "vpc_filter" {
+  description = "Filters used to populate the `vpc_id` field. `vpc_id` take precedence over this"
+  type        = list(map(string))
+  default     = []
+}
+
+variable "vpc_id" {
+  description = "If launching into a VPC subnet, Packer needs the VPC ID in order to create a temporary security group within the VPC. Requires `subnet_id` to be set. If this field is left blank, Packer will try to get the VPC ID from the `subnet_id`"
+  type        = string
+  default     = null
+}
+
+variable "metadata_options" {
+  description = "Configures the metadata options for the instance launched"
+  type        = map(string)
+  default = {
+    http_endpoint               = "enabled"
+    http_tokens                 = "required"
+    http_put_response_hop_limit = 1
+  }
+}
+
+################################################################################
+# Build
+################################################################################
+
+variable "shell_provisioner1" {
+  description = "Values passed to the first shell provisioner"
+  default     = {}
+}
+
+variable "shell_provisioner2" {
+  description = "Values passed to the second shell provisioner"
+  default     = {}
+}
+
+variable "shell_provisioner3" {
+  description = "Values passed to the third/last shell provisioner"
+  default     = {}
+}
diff --git a/examples/eks-hybrid-nodes/main.tf b/examples/eks-hybrid-nodes/main.tf
new file mode 100644
index 0000000000..d23f05e370
--- /dev/null
+++ b/examples/eks-hybrid-nodes/main.tf
@@ -0,0 +1,148 @@
+provider "aws" {
+  region = local.region
+}
+
+provider "helm" {
+  kubernetes = {
+    host                   = module.eks.cluster_endpoint
+    cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+
+    exec = {
+      api_version = "client.authentication.k8s.io/v1beta1"
+      command     = "aws"
+      # This requires the awscli to be installed locally where Terraform is executed
+      args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
+    }
+  }
+}
+
+locals {
+  name   = "ex-${basename(path.cwd)}"
+  region = "us-west-2"
+
+  kubernetes_version = "1.33"
+
+  tags = {
+    Test       = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# EKS Cluster
+################################################################################
+
+module "eks" {
+  source = "../.."
+
+  name               = local.name
+  kubernetes_version = local.kubernetes_version
+
+  endpoint_public_access                   = true
+  enable_cluster_creator_admin_permissions = true
+
+  addons = {
+    coredns                = {}
+    eks-pod-identity-agent = {}
+    kube-proxy             = {}
+  }
+
+  create_node_security_group = false
+  security_group_additional_rules = {
+    hybrid-all = {
+      cidr_blocks = [local.remote_network_cidr]
+      description = "Allow all traffic from remote node/pod network"
+      from_port   = 0
+      to_port     = 0
+      protocol    = "all"
+      type        = "ingress"
+    }
+  }
+
+  compute_config = {
+    enabled    = true
+    node_pools = ["system"]
+  }
+
+  access_entries = {
+    hybrid-node-role = {
+      principal_arn = module.eks_hybrid_node_role.arn
+      type          = "HYBRID_LINUX"
+    }
+  }
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  remote_network_config = {
+    remote_node_networks = {
+      cidrs = [local.remote_node_cidr]
+    }
+    remote_pod_networks = {
+      cidrs = [local.remote_pod_cidr]
+    }
+  }
+
+  tags = local.tags
+}
+
+################################################################################
+# VPC
+################################################################################
+
+locals {
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
+
+################################################################################
+# VPC Peering Connection
+################################################################################
+
+resource "aws_vpc_peering_connection_accepter" "peer" {
+  vpc_peering_connection_id = aws_vpc_peering_connection.remote_node.id
+  auto_accept               = true
+
+  tags = local.tags
+}
+
+resource "aws_route" "peer" {
+  route_table_id            = one(module.vpc.private_route_table_ids)
+  destination_cidr_block    = local.remote_network_cidr
+  vpc_peering_connection_id = aws_vpc_peering_connection.remote_node.id
+}
diff --git a/examples/eks-hybrid-nodes/outputs.tf b/examples/eks-hybrid-nodes/outputs.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/eks-hybrid-nodes/remote.tf b/examples/eks-hybrid-nodes/remote.tf
new file mode 100644
index 0000000000..5aaf85ec96
--- /dev/null
+++ b/examples/eks-hybrid-nodes/remote.tf
@@ -0,0 +1,314 @@
+provider "aws" {
+  alias  = "remote"
+  region = "us-east-1"
+}
+
+################################################################################
+# Hybrid Node IAM Module
+################################################################################
+
+module "eks_hybrid_node_role" {
+  source = "../../modules/hybrid-node-role"
+
+  tags = local.tags
+}
+
+################################################################################
+# Psuedo Hybrid Node
+# Demonstration only - AWS EC2 instances are not supported for EKS Hybrid nodes
+################################################################################
+
+# Activation should be done is same region as cluster
+resource "aws_ssm_activation" "this" {
+  name               = "hybrid-node"
+  iam_role           = module.eks_hybrid_node_role.name
+  registration_limit = 10
+
+  tags = local.tags
+}
+
+module "key_pair" {
+  source  = "terraform-aws-modules/key-pair/aws"
+  version = "~> 2.0"
+
+  providers = {
+    aws = aws.remote
+  }
+
+  key_name           = "hybrid-node"
+  create_private_key = true
+
+  tags = local.tags
+}
+
+resource "local_file" "key_pem" {
+  content         = module.key_pair.private_key_pem
+  filename        = "key.pem"
+  file_permission = "0600"
+}
+
+resource "local_file" "key_pub_pem" {
+  content         = module.key_pair.public_key_pem
+  filename        = "key_pub.pem"
+  file_permission = "0600"
+}
+
+resource "local_file" "join" {
+  content  = <<-EOT
+    #!/usr/bin/env bash
+
+    cat <<EOF > nodeConfig.yaml
+    apiVersion: node.eks.aws/v1alpha1
+    kind: NodeConfig
+    spec:
+      cluster:
+        name: ${module.eks.cluster_name}
+        region: ${local.region}
+      hybrid:
+        ssm:
+          activationCode: ${aws_ssm_activation.this.activation_code}
+          activationId: ${aws_ssm_activation.this.id}
+    EOF
+
+    # Use SCP/SSH to execute commands on the remote host
+    scp -i ${local_file.key_pem.filename} nodeConfig.yaml ubuntu@${aws_instance.hybrid_node["one"].public_ip}:/home/ubuntu/nodeConfig.yaml
+    ssh -n -i ${local_file.key_pem.filename} ubuntu@${aws_instance.hybrid_node["one"].public_ip} sudo nodeadm init -c file://nodeConfig.yaml
+    ssh -n -i ${local_file.key_pem.filename} ubuntu@${aws_instance.hybrid_node["one"].public_ip} sudo systemctl daemon-reload
+
+    scp -i ${local_file.key_pem.filename} nodeConfig.yaml ubuntu@${aws_instance.hybrid_node["two"].public_ip}:/home/ubuntu/nodeConfig.yaml
+    ssh -n -i ${local_file.key_pem.filename} ubuntu@${aws_instance.hybrid_node["two"].public_ip} sudo nodeadm init -c file://nodeConfig.yaml
+    ssh -n -i ${local_file.key_pem.filename} ubuntu@${aws_instance.hybrid_node["two"].public_ip} sudo systemctl daemon-reload
+
+    # Clean up
+    rm nodeConfig.yaml
+  EOT
+  filename = "join.sh"
+}
+
+data "aws_ami" "hybrid_node" {
+  provider = aws.remote
+
+  most_recent = true
+  name_regex  = "eks-hybrid-ubuntu-${local.kubernetes_version}-amd64-*"
+  owners      = ["self"]
+}
+
+# Demonstration only - AWS EC2 instances are not supported for EKS Hybrid nodes
+resource "aws_instance" "hybrid_node" {
+  provider = aws.remote
+
+  for_each = { one = 0, two = 1 }
+
+  ami                         = data.aws_ami.hybrid_node.id
+  associate_public_ip_address = true
+  instance_type               = "m5.large"
+
+  # Block IMDS to make instance look less like EC2 and more like vanilla VM
+  metadata_options {
+    http_endpoint = "disabled"
+  }
+
+  vpc_security_group_ids = [aws_security_group.remote_node.id]
+  subnet_id              = element(module.remote_node_vpc.public_subnets, each.value)
+
+  tags = merge(
+    local.tags,
+    { Name = "hybrid-node-${each.key}" }
+  )
+}
+
+################################################################################
+# Psuedo Hybrid Node - Security Group
+# Demonstration only - AWS EC2 instances are not supported for EKS Hybrid nodes
+################################################################################
+
+# Retrieve the IP of where the Terraform is running to restrict SSH access to that IP
+data "http" "icanhazip" {
+  url = "http://icanhazip.com"
+}
+
+resource "aws_security_group" "remote_node" {
+  provider = aws.remote
+
+  name                   = "hybrid-node"
+  vpc_id                 = module.remote_node_vpc.vpc_id
+  revoke_rules_on_delete = true
+
+  tags = merge(
+    local.tags,
+    { Name = "hybrid-node" }
+  )
+}
+
+resource "aws_vpc_security_group_ingress_rule" "remote_node" {
+  provider = aws.remote
+
+  for_each = {
+    cluster-all = {
+      description = "Allow all traffic from cluster network"
+      cidr_ipv4   = module.vpc.vpc_cidr_block
+      ip_protocol = "all"
+    }
+    remote-all = {
+      description                  = "Allow all traffic from within the remote network itself"
+      ip_protocol                  = "all"
+      referenced_security_group_id = aws_security_group.remote_node.id
+    }
+    # Restrict SSH access to only the IP where Terraform is running
+    ssh = {
+      description = "Local SSH access to join node to cluster"
+      cidr_ipv4   = "${chomp(data.http.icanhazip.response_body)}/32"
+      from_port   = "22"
+      ip_protocol = "tcp"
+    }
+  }
+
+  cidr_ipv4                    = try(each.value.cidr_ipv4, null)
+  from_port                    = try(each.value.from_port, null)
+  ip_protocol                  = try(each.value.ip_protocol, null)
+  to_port                      = try(each.value.to_port, each.value.from_port, null)
+  referenced_security_group_id = try(each.value.referenced_security_group_id, null)
+  security_group_id            = aws_security_group.remote_node.id
+
+  tags = merge(
+    local.tags,
+    { Name = "hybrid-node-${each.key}" }
+  )
+}
+
+resource "aws_vpc_security_group_egress_rule" "remote_node" {
+  provider = aws.remote
+
+  for_each = {
+    all = {
+      description = "Allow all egress"
+      cidr_ipv4   = "0.0.0.0/0"
+      ip_protocol = "all"
+    }
+  }
+
+  cidr_ipv4                    = try(each.value.cidr_ipv4, null)
+  from_port                    = try(each.value.from_port, null)
+  ip_protocol                  = try(each.value.ip_protocol, null)
+  to_port                      = try(each.value.to_port, each.value.from_port, null)
+  referenced_security_group_id = try(each.value.referenced_security_group_id, null)
+  security_group_id            = aws_security_group.remote_node.id
+
+  tags = merge(
+    local.tags,
+    { Name = "hybrid-node-${each.key}" }
+  )
+}
+
+################################################################################
+# Cilium CNI
+################################################################################
+
+resource "helm_release" "cilium" {
+  name       = "cilium"
+  repository = "https://helm.cilium.io/"
+  chart      = "cilium"
+  version    = "1.16.4"
+  namespace  = "kube-system"
+  wait       = false
+
+  values = [
+    <<-EOT
+      nodeSelector:
+        eks.amazonaws.com/compute-type: hybrid
+      ipam:
+        mode: cluster-pool
+        operator:
+          clusterPoolIPv4MaskSize: 26
+          clusterPoolIPv4PodCIDRList:
+            - ${local.remote_pod_cidr}
+      operator:
+        unmanagedPodWatcher:
+          restart: false
+    EOT
+  ]
+}
+
+################################################################################
+# VPC
+################################################################################
+
+locals {
+  remote_network_cidr = "172.16.0.0/16"
+  remote_node_cidr    = cidrsubnet(local.remote_network_cidr, 2, 0)
+  remote_pod_cidr     = cidrsubnet(local.remote_network_cidr, 2, 1)
+
+  remote_node_azs = slice(data.aws_availability_zones.remote.names, 0, 3)
+}
+
+data "aws_availability_zones" "remote" {
+  provider = aws.remote
+
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+module "remote_node_vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  providers = {
+    aws = aws.remote
+  }
+
+  name = local.name
+  cidr = local.remote_network_cidr
+
+  azs             = local.remote_node_azs
+  private_subnets = [for k, v in local.remote_node_azs : cidrsubnet(local.remote_network_cidr, 4, k)]
+  public_subnets  = [for k, v in local.remote_node_azs : cidrsubnet(local.remote_network_cidr, 8, k + 48)]
+
+  public_subnet_tags = {
+    # For building the AMI
+    "eks-hybrid-packer" : "true"
+  }
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  tags = local.tags
+}
+
+################################################################################
+# VPC Peering Connection
+################################################################################
+
+resource "aws_vpc_peering_connection" "remote_node" {
+  provider = aws.remote
+
+  auto_accept = false
+
+  peer_vpc_id = module.vpc.vpc_id
+  peer_region = local.region
+
+  vpc_id = module.remote_node_vpc.vpc_id
+
+  tags = merge(
+    local.tags,
+    { Name = "remote-node" }
+  )
+}
+
+resource "aws_route" "remote_node_private" {
+  provider = aws.remote
+
+  route_table_id            = one(module.remote_node_vpc.private_route_table_ids)
+  destination_cidr_block    = module.vpc.vpc_cidr_block
+  vpc_peering_connection_id = aws_vpc_peering_connection.remote_node.id
+}
+
+resource "aws_route" "remote_node_public" {
+  provider = aws.remote
+
+  route_table_id            = one(module.remote_node_vpc.public_route_table_ids)
+  destination_cidr_block    = module.vpc.vpc_cidr_block
+  vpc_peering_connection_id = aws_vpc_peering_connection.remote_node.id
+}
diff --git a/examples/eks-hybrid-nodes/variables.tf b/examples/eks-hybrid-nodes/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/eks-hybrid-nodes/versions.tf b/examples/eks-hybrid-nodes/versions.tf
new file mode 100644
index 0000000000..29273c9ce0
--- /dev/null
+++ b/examples/eks-hybrid-nodes/versions.tf
@@ -0,0 +1,26 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 3.0"
+    }
+    http = {
+      source  = "hashicorp/http"
+      version = ">= 3.4"
+    }
+    local = {
+      source  = "hashicorp/local"
+      version = ">= 2.5"
+    }
+    tls = {
+      source  = "hashicorp/tls"
+      version = ">= 4.0"
+    }
+  }
+}
diff --git a/examples/eks-managed-node-group/README.md b/examples/eks-managed-node-group/README.md
new file mode 100644
index 0000000000..16708dca42
--- /dev/null
+++ b/examples/eks-managed-node-group/README.md
@@ -0,0 +1,22 @@
+# EKS Managed Node Group Examples
+
+Configuration in this directory creates Amazon EKS clusters with EKS Managed Node Groups demonstrating different configurations:
+
+- `eks-al2023.tf` demonstrates an EKS cluster using EKS managed node group that utilizes the EKS Amazon Linux 2023 optimized AMI
+- `eks-bottlerocket.tf` demonstrates an EKS cluster using EKS managed node group that utilizes the Bottlerocket EKS optimized AMI
+
+See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for additional details on Amazon EKS managed node groups.
+
+The different cluster configuration examples provided are separated per file and independent of the other cluster configurations.
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/eks-managed-node-group/eks-al2023.tf b/examples/eks-managed-node-group/eks-al2023.tf
new file mode 100644
index 0000000000..69897a5a04
--- /dev/null
+++ b/examples/eks-managed-node-group/eks-al2023.tf
@@ -0,0 +1,55 @@
+module "eks_al2023" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = "${local.name}-al2023"
+  kubernetes_version = "1.33"
+
+  # EKS Addons
+  addons = {
+    coredns = {}
+    eks-pod-identity-agent = {
+      before_compute = true
+    }
+    kube-proxy = {}
+    vpc-cni = {
+      before_compute = true
+    }
+  }
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  eks_managed_node_groups = {
+    example = {
+      # Starting on 1.30, AL2023 is the default AMI type for EKS managed node groups
+      instance_types = ["m6i.large"]
+      ami_type       = "AL2023_x86_64_STANDARD"
+
+      min_size = 2
+      max_size = 5
+      # This value is ignored after the initial creation
+      # https://github.com/bryantbiggs/eks-desired-size-hack
+      desired_size = 2
+
+      # This is not required - demonstrates how to pass additional configuration to nodeadm
+      # Ref https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
+      cloudinit_pre_nodeadm = [
+        {
+          content_type = "application/node.eks.aws"
+          content      = <<-EOT
+            ---
+            apiVersion: node.eks.aws/v1alpha1
+            kind: NodeConfig
+            spec:
+              kubelet:
+                config:
+                  shutdownGracePeriod: 30s
+          EOT
+        }
+      ]
+    }
+  }
+
+  tags = local.tags
+}
diff --git a/examples/eks-managed-node-group/eks-bottlerocket.tf b/examples/eks-managed-node-group/eks-bottlerocket.tf
new file mode 100644
index 0000000000..a4a9928d0f
--- /dev/null
+++ b/examples/eks-managed-node-group/eks-bottlerocket.tf
@@ -0,0 +1,56 @@
+module "eks_bottlerocket" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = "${local.name}-bottlerocket"
+  kubernetes_version = "1.33"
+
+  # EKS Addons
+  addons = {
+    coredns = {}
+    eks-pod-identity-agent = {
+      before_compute = true
+    }
+    kube-proxy = {}
+    vpc-cni = {
+      before_compute = true
+    }
+  }
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  eks_managed_node_groups = {
+    example = {
+      ami_type       = "BOTTLEROCKET_x86_64"
+      instance_types = ["m6i.large"]
+
+      min_size = 2
+      max_size = 5
+      # This value is ignored after the initial creation
+      # https://github.com/bryantbiggs/eks-desired-size-hack
+      desired_size = 2
+
+      # This is not required - demonstrates how to pass additional configuration
+      # Ref https://bottlerocket.dev/en/os/1.19.x/api/settings/
+      bootstrap_extra_args = <<-EOT
+        # The admin host container provides SSH access and runs with "superpowers".
+        # It is disabled by default, but can be disabled explicitly.
+        [settings.host-containers.admin]
+        enabled = false
+
+        # The control host container provides out-of-band access via SSM.
+        # It is enabled by default, and can be disabled if you do not expect to use SSM.
+        # This could leave you with no way to access the API and change settings on an existing node!
+        [settings.host-containers.control]
+        enabled = true
+
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+      EOT
+    }
+  }
+
+  tags = local.tags
+}
diff --git a/examples/eks-managed-node-group/main.tf b/examples/eks-managed-node-group/main.tf
new file mode 100644
index 0000000000..3adeaeb4c0
--- /dev/null
+++ b/examples/eks-managed-node-group/main.tf
@@ -0,0 +1,55 @@
+provider "aws" {
+  region = local.region
+}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+locals {
+  name   = "ex-eks-mng"
+  region = "eu-west-1"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Example    = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# VPC
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
diff --git a/examples/eks-managed-node-group/outputs.tf b/examples/eks-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/eks-managed-node-group/variables.tf b/examples/eks-managed-node-group/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/eks-managed-node-group/versions.tf b/examples/eks-managed-node-group/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/examples/eks-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf
deleted file mode 100644
index 0697a58c6b..0000000000
--- a/examples/fargate/main.tf
+++ /dev/null
@@ -1,107 +0,0 @@
-terraform {
-  required_version = ">= 0.12.6"
-}
-
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc"
-  cidr                 = "172.16.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  private_subnets      = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
-  public_subnets       = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
-  enable_nat_gateway   = true
-  single_nat_gateway   = true
-  enable_dns_hostnames = true
-
-  public_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/elb"                      = "1"
-  }
-
-  private_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/internal-elb"             = "1"
-  }
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.private_subnets
-
-  tags = {
-    Environment = "test"
-    GithubRepo  = "terraform-aws-eks"
-    GithubOrg   = "terraform-aws-modules"
-  }
-
-  vpc_id = module.vpc.vpc_id
-
-  fargate_profiles = {
-    default = {
-      name = "default"
-      selectors = [
-        {
-          namespace = "kube-system"
-          labels = {
-            k8s-app = "kube-dns"
-          }
-        },
-        {
-          namespace = "default"
-          # Kubernetes labels for selection
-          # labels = {
-          #   Environment = "test"
-          #   GithubRepo  = "terraform-aws-eks"
-          #   GithubOrg   = "terraform-aws-modules"
-          # }
-        }
-      ]
-
-      # using specific subnets instead of all the ones configured in eks
-      # subnets = ["subnet-0ca3e3d1234a56c78"]
-
-      tags = {
-        Owner = "test"
-      }
-    }
-  }
-
-  map_roles    = var.map_roles
-  map_users    = var.map_users
-  map_accounts = var.map_accounts
-}
diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf
deleted file mode 100644
index 59aa57a2c9..0000000000
--- a/examples/fargate/outputs.tf
+++ /dev/null
@@ -1,29 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
-
-output "fargate_profile_arns" {
-  description = "Outputs from node groups"
-  value       = module.eks.fargate_profile_arns
-}
diff --git a/examples/fargate/variables.tf b/examples/fargate/variables.tf
deleted file mode 100644
index 7085aeabd4..0000000000
--- a/examples/fargate/variables.tf
+++ /dev/null
@@ -1,52 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
-variable "map_accounts" {
-  description = "Additional AWS account numbers to add to the aws-auth configmap."
-  type        = list(string)
-
-  default = [
-    "777777777777",
-    "888888888888",
-  ]
-}
-
-variable "map_roles" {
-  description = "Additional IAM roles to add to the aws-auth configmap."
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      rolearn  = "arn:aws:iam::66666666666:role/role1"
-      username = "role1"
-      groups   = ["system:masters"]
-    },
-  ]
-}
-
-variable "map_users" {
-  description = "Additional IAM users to add to the aws-auth configmap."
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user1"
-      username = "user1"
-      groups   = ["system:masters"]
-    },
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user2"
-      username = "user2"
-      groups   = ["system:masters"]
-    },
-  ]
-}
diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/fargate/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf
deleted file mode 100644
index 1883ecc70c..0000000000
--- a/examples/instance_refresh/main.tf
+++ /dev/null
@@ -1,260 +0,0 @@
-# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup
-
-provider "aws" {
-  region = var.region
-}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-provider "helm" {
-  kubernetes {
-    host                   = data.aws_eks_cluster.cluster.endpoint
-    cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-    token                  = data.aws_eks_cluster_auth.cluster.token
-  }
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-refresh-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 3.0.0"
-
-  name                 = local.cluster_name
-  cidr                 = "10.0.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  public_subnets       = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-  enable_dns_hostnames = true
-}
-
-data "aws_iam_policy_document" "aws_node_termination_handler" {
-  statement {
-    effect = "Allow"
-    actions = [
-      "ec2:DescribeInstances",
-      "autoscaling:DescribeAutoScalingInstances",
-      "autoscaling:DescribeTags",
-    ]
-    resources = [
-      "*",
-    ]
-  }
-  statement {
-    effect = "Allow"
-    actions = [
-      "autoscaling:CompleteLifecycleAction",
-    ]
-    resources = module.eks.workers_asg_arns
-  }
-  statement {
-    effect = "Allow"
-    actions = [
-      "sqs:DeleteMessage",
-      "sqs:ReceiveMessage"
-    ]
-    resources = [
-      module.aws_node_termination_handler_sqs.sqs_queue_arn
-    ]
-  }
-}
-
-resource "aws_iam_policy" "aws_node_termination_handler" {
-  name   = "${local.cluster_name}-aws-node-termination-handler"
-  policy = data.aws_iam_policy_document.aws_node_termination_handler.json
-}
-
-data "aws_iam_policy_document" "aws_node_termination_handler_events" {
-  statement {
-    effect = "Allow"
-    principals {
-      type = "Service"
-      identifiers = [
-        "events.amazonaws.com",
-        "sqs.amazonaws.com",
-      ]
-    }
-    actions = [
-      "sqs:SendMessage",
-    ]
-    resources = [
-      "arn:aws:sqs:${var.region}:${data.aws_caller_identity.current.account_id}:${local.cluster_name}",
-    ]
-  }
-}
-
-module "aws_node_termination_handler_sqs" {
-  source                    = "terraform-aws-modules/sqs/aws"
-  version                   = "~> 3.0.0"
-  name                      = local.cluster_name
-  message_retention_seconds = 300
-  policy                    = data.aws_iam_policy_document.aws_node_termination_handler_events.json
-}
-
-resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
-  name        = "${local.cluster_name}-asg-termination"
-  description = "Node termination event rule"
-  event_pattern = jsonencode(
-    {
-      "source" : [
-        "aws.autoscaling"
-      ],
-      "detail-type" : [
-        "EC2 Instance-terminate Lifecycle Action"
-      ]
-      "resources" : module.eks.workers_asg_arns
-    }
-  )
-}
-
-resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
-  target_id = "${local.cluster_name}-asg-termination"
-  rule      = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
-  arn       = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
-  name        = "${local.cluster_name}-spot-termination"
-  description = "Node termination event rule"
-  event_pattern = jsonencode(
-    {
-      "source" : [
-        "aws.ec2"
-      ],
-      "detail-type" : [
-        "EC2 Spot Instance Interruption Warning"
-      ]
-      "resources" : module.eks.workers_asg_arns
-    }
-  )
-}
-
-resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
-  target_id = "${local.cluster_name}-spot-termination"
-  rule      = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
-  arn       = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-module "aws_node_termination_handler_role" {
-  source                        = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
-  version                       = "4.1.0"
-  create_role                   = true
-  role_description              = "IRSA role for ANTH, cluster ${local.cluster_name}"
-  role_name_prefix              = local.cluster_name
-  provider_url                  = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
-  role_policy_arns              = [aws_iam_policy.aws_node_termination_handler.arn]
-  oidc_fully_qualified_subjects = ["system:serviceaccount:${var.namespace}:${var.serviceaccount}"]
-}
-
-resource "helm_release" "aws_node_termination_handler" {
-  depends_on = [
-    module.eks
-  ]
-
-  name             = "aws-node-termination-handler"
-  namespace        = var.namespace
-  repository       = "https://aws.github.io/eks-charts"
-  chart            = "aws-node-termination-handler"
-  version          = var.aws_node_termination_handler_chart_version
-  create_namespace = true
-
-  set {
-    name  = "awsRegion"
-    value = var.region
-  }
-  set {
-    name  = "serviceAccount.name"
-    value = var.serviceaccount
-  }
-  set {
-    name  = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
-    value = module.aws_node_termination_handler_role.iam_role_arn
-    type  = "string"
-  }
-  set {
-    name  = "enableSqsTerminationDraining"
-    value = "true"
-  }
-  set {
-    name  = "enableSpotInterruptionDraining"
-    value = "true"
-  }
-  set {
-    name  = "queueURL"
-    value = module.aws_node_termination_handler_sqs.sqs_queue_id
-  }
-  set {
-    name  = "logLevel"
-    value = "debug"
-  }
-}
-
-# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
-# ensures that node termination does not require the lifecycle action to be completed,
-# and thus allows the ASG to be destroyed cleanly.
-resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
-  count                  = length(module.eks.workers_asg_names)
-  name                   = "aws-node-termination-handler"
-  autoscaling_group_name = module.eks.workers_asg_names[count.index]
-  lifecycle_transition   = "autoscaling:EC2_INSTANCE_TERMINATING"
-  heartbeat_timeout      = 300
-  default_result         = "CONTINUE"
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.public_subnets
-  vpc_id          = module.vpc.vpc_id
-  enable_irsa     = true
-  worker_groups_launch_template = [
-    {
-      name                                 = "refresh"
-      asg_max_size                         = 2
-      asg_desired_capacity                 = 2
-      instance_refresh_enabled             = true
-      instance_refresh_instance_warmup     = 60
-      public_ip                            = true
-      metadata_http_put_response_hop_limit = 3
-      update_default_version               = true
-      instance_refresh_triggers            = ["tag"]
-      tags = [
-        {
-          key                 = "aws-node-termination-handler/managed"
-          value               = ""
-          propagate_at_launch = true
-        },
-        {
-          key                 = "foo"
-          value               = "buzz"
-          propagate_at_launch = true
-        }
-      ]
-    }
-  ]
-}
diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf
deleted file mode 100644
index 3b981f1f1b..0000000000
--- a/examples/instance_refresh/outputs.tf
+++ /dev/null
@@ -1,34 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
-
-output "sqs_queue_asg_notification_arn" {
-  description = "SQS queue ASG notification ARN"
-  value       = module.aws_node_termination_handler_sqs.sqs_queue_arn
-}
-
-output "sqs_queue_asg_notification_url" {
-  description = "SQS queue ASG notification URL"
-  value       = module.aws_node_termination_handler_sqs.sqs_queue_id
-}
diff --git a/examples/instance_refresh/variables.tf b/examples/instance_refresh/variables.tf
deleted file mode 100644
index 96fc26df92..0000000000
--- a/examples/instance_refresh/variables.tf
+++ /dev/null
@@ -1,18 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
-variable "aws_node_termination_handler_chart_version" {
-  description = "Version of the aws-node-termination-handler Helm chart to install."
-  default     = "0.15.0"
-}
-
-variable "namespace" {
-  description = "Namespace for the aws-node-termination-handler."
-  default     = "kube-system"
-}
-
-variable "serviceaccount" {
-  description = "Serviceaccount for the aws-node-termination-handler."
-  default     = "aws-node-termination-handler"
-}
diff --git a/examples/instance_refresh/versions.tf b/examples/instance_refresh/versions.tf
deleted file mode 100644
index 67281c8d51..0000000000
--- a/examples/instance_refresh/versions.tf
+++ /dev/null
@@ -1,11 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-    helm       = "~> 2.1.2"
-  }
-}
diff --git a/examples/irsa/README.md b/examples/irsa/README.md
deleted file mode 100644
index 53dc500dab..0000000000
--- a/examples/irsa/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# IAM Roles for Service Accounts
-
-This example shows how to create an IAM role to be used for a Kubernetes `ServiceAccount`. It will create a policy and role to be used by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) using the [public Helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart).
-
-The AWS documentation for IRSA is here: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html
-
-## Setup
-
-Run Terraform:
-
-```
-terraform init
-terraform apply
-```
-
-Set kubectl context to the new cluster: `export KUBECONFIG=kubeconfig_test-eks-irsa`
-
-Check that there is a node that is `Ready`:
-
-```
-$ kubectl get nodes
-NAME                                       STATUS   ROLES    AGE     VERSION
-ip-10-0-2-190.us-west-2.compute.internal   Ready    <none>   6m39s   v1.14.8-eks-b8860f
-```
-
-Replace `<ACCOUNT ID>` with your AWS account ID in `cluster-autoscaler-chart-values.yaml`. There is output from terraform for this.
-
-Install the chart using the provided values file:
-
-```
-$ helm repo add autoscaler https://kubernetes.github.io/autoscaler
-$ helm repo update
-$ helm install cluster-autoscaler --namespace kube-system autoscaler/cluster-autoscaler --values cluster-autoscaler-chart-values.yaml
-```
-
-## Verify
-
-Ensure the cluster-autoscaler pod is running:
-
-```
-$ kubectl --namespace=kube-system get pods -l "app.kubernetes.io/name=aws-cluster-autoscaler-chart"
-NAME                                                              READY   STATUS    RESTARTS   AGE
-cluster-autoscaler-aws-cluster-autoscaler-chart-5545d4b97-9ztpm   1/1     Running   0          3m
-```
-
-Observe the `AWS_*` environment variables that were added to the pod automatically by EKS:
-
-```
-kubectl --namespace=kube-system get pods -l "app.kubernetes.io/name=aws-cluster-autoscaler-chart" -o yaml | grep -A3 AWS_ROLE_ARN
-
-- name: AWS_ROLE_ARN
-  value: arn:aws:iam::xxxxxxxxx:role/cluster-autoscaler
-- name: AWS_WEB_IDENTITY_TOKEN_FILE
-  value: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
-```
-
-Verify it is working by checking the logs, you should see that it has discovered the autoscaling group successfully:
-
-```
-kubectl --namespace=kube-system logs -l "app.kubernetes.io/name=aws-cluster-autoscaler-chart"
-
-I0128 14:59:00.901513       1 auto_scaling_groups.go:354] Regenerating instance to ASG map for ASGs: [test-eks-irsa-worker-group-12020012814125354700000000e]
-I0128 14:59:00.969875       1 auto_scaling_groups.go:138] Registering ASG test-eks-irsa-worker-group-12020012814125354700000000e
-I0128 14:59:00.969906       1 aws_manager.go:263] Refreshed ASG list, next refresh after 2020-01-28 15:00:00.969901767 +0000 UTC m=+61.310501783
-```
diff --git a/examples/irsa/cluster-autoscaler-chart-values.yaml b/examples/irsa/cluster-autoscaler-chart-values.yaml
deleted file mode 100644
index ccde4365a5..0000000000
--- a/examples/irsa/cluster-autoscaler-chart-values.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-awsRegion: us-west-2
-
-rbac:
-  create: true
-  serviceAccount:
-    # This value should match local.k8s_service_account_name in locals.tf
-    name: cluster-autoscaler-aws-cluster-autoscaler-chart
-    annotations:
-      # This value should match the ARN of the role created by module.iam_assumable_role_admin in irsa.tf
-      eks.amazonaws.com/role-arn: "arn:aws:iam::<ACCOUNT ID>:role/cluster-autoscaler"
-
-autoDiscovery:
-  clusterName: test-eks-irsa
-  enabled: true
diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf
deleted file mode 100644
index 1775e33ecb..0000000000
--- a/examples/irsa/irsa.tf
+++ /dev/null
@@ -1,57 +0,0 @@
-module "iam_assumable_role_admin" {
-  source                        = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
-  version                       = "3.6.0"
-  create_role                   = true
-  role_name                     = "cluster-autoscaler"
-  provider_url                  = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
-  role_policy_arns              = [aws_iam_policy.cluster_autoscaler.arn]
-  oidc_fully_qualified_subjects = ["system:serviceaccount:${local.k8s_service_account_namespace}:${local.k8s_service_account_name}"]
-}
-
-resource "aws_iam_policy" "cluster_autoscaler" {
-  name_prefix = "cluster-autoscaler"
-  description = "EKS cluster-autoscaler policy for cluster ${module.eks.cluster_id}"
-  policy      = data.aws_iam_policy_document.cluster_autoscaler.json
-}
-
-data "aws_iam_policy_document" "cluster_autoscaler" {
-  statement {
-    sid    = "clusterAutoscalerAll"
-    effect = "Allow"
-
-    actions = [
-      "autoscaling:DescribeAutoScalingGroups",
-      "autoscaling:DescribeAutoScalingInstances",
-      "autoscaling:DescribeLaunchConfigurations",
-      "autoscaling:DescribeTags",
-      "ec2:DescribeLaunchTemplateVersions",
-    ]
-
-    resources = ["*"]
-  }
-
-  statement {
-    sid    = "clusterAutoscalerOwn"
-    effect = "Allow"
-
-    actions = [
-      "autoscaling:SetDesiredCapacity",
-      "autoscaling:TerminateInstanceInAutoScalingGroup",
-      "autoscaling:UpdateAutoScalingGroup",
-    ]
-
-    resources = ["*"]
-
-    condition {
-      test     = "StringEquals"
-      variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.eks.cluster_id}"
-      values   = ["owned"]
-    }
-
-    condition {
-      test     = "StringEquals"
-      variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
-      values   = ["true"]
-    }
-  }
-}
diff --git a/examples/irsa/locals.tf b/examples/irsa/locals.tf
deleted file mode 100644
index a0e5da0c28..0000000000
--- a/examples/irsa/locals.tf
+++ /dev/null
@@ -1,5 +0,0 @@
-locals {
-  cluster_name                  = "test-eks-irsa"
-  k8s_service_account_namespace = "kube-system"
-  k8s_service_account_name      = "cluster-autoscaler-aws-cluster-autoscaler-chart"
-}
diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf
deleted file mode 100644
index e6c9fa4018..0000000000
--- a/examples/irsa/main.tf
+++ /dev/null
@@ -1,66 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {}
-
-data "aws_caller_identity" "current" {}
-
-module "vpc" {
-  source               = "terraform-aws-modules/vpc/aws"
-  version              = "2.64.0"
-  name                 = "test-vpc"
-  cidr                 = "10.0.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  public_subnets       = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
-  enable_dns_hostnames = true
-
-  public_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/elb"                      = "1"
-  }
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.public_subnets
-  vpc_id          = module.vpc.vpc_id
-  enable_irsa     = true
-
-  worker_groups = [
-    {
-      name                 = "worker-group-1"
-      instance_type        = "t3.medium"
-      asg_desired_capacity = 1
-      tags = [
-        {
-          "key"                 = "k8s.io/cluster-autoscaler/enabled"
-          "propagate_at_launch" = "false"
-          "value"               = "true"
-        },
-        {
-          "key"                 = "k8s.io/cluster-autoscaler/${local.cluster_name}"
-          "propagate_at_launch" = "false"
-          "value"               = "owned"
-        }
-      ]
-    }
-  ]
-}
diff --git a/examples/irsa/outputs.tf b/examples/irsa/outputs.tf
deleted file mode 100644
index ef2ab9577a..0000000000
--- a/examples/irsa/outputs.tf
+++ /dev/null
@@ -1,3 +0,0 @@
-output "aws_account_id" {
-  value = data.aws_caller_identity.current.account_id
-}
diff --git a/examples/irsa/variables.tf b/examples/irsa/variables.tf
deleted file mode 100644
index 81b8dbe73e..0000000000
--- a/examples/irsa/variables.tf
+++ /dev/null
@@ -1,3 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
diff --git a/examples/irsa/versions.tf b/examples/irsa/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/irsa/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md
new file mode 100644
index 0000000000..47cfbf3ae2
--- /dev/null
+++ b/examples/karpenter/README.md
@@ -0,0 +1,132 @@
+# Karpenter Example
+
+Configuration in this directory creates an AWS EKS cluster with [Karpenter](https://karpenter.sh/) provisioned for managing compute resource scaling. In the example provided, Karpenter is provisioned on top of an EKS Managed Node Group.
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Once the cluster is up and running, you can check that Karpenter is functioning as intended with the following command:
+
+```bash
+# First, make sure you have updated your local kubeconfig
+aws eks --region eu-west-1 update-kubeconfig --name ex-karpenter
+
+# Second, deploy the Karpenter NodeClass/NodePool
+kubectl apply -f karpenter.yaml
+
+# Second, deploy the example deployment
+kubectl apply -f inflate.yaml
+
+# You can watch Karpenter's controller logs with
+kubectl logs -f -n kube-system -l app.kubernetes.io/name=karpenter -c controller
+```
+
+Validate if the Amazon EKS Addons Pods are running in the Managed Node Group and the `inflate` application Pods are running on Karpenter provisioned Nodes.
+
+```bash
+kubectl get nodes -L karpenter.sh/registered
+```
+
+```text
+NAME                                        STATUS   ROLES    AGE   VERSION               REGISTERED
+ip-10-0-13-51.eu-west-1.compute.internal    Ready    <none>   29s   v1.31.1-eks-1b3e656   true
+ip-10-0-41-242.eu-west-1.compute.internal   Ready    <none>   35m   v1.31.1-eks-1b3e656
+ip-10-0-8-151.eu-west-1.compute.internal    Ready    <none>   35m   v1.31.1-eks-1b3e656
+```
+
+```sh
+kubectl get pods -A -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName
+```
+
+```text
+NAME                           NODE
+inflate-67cd5bb766-hvqfn       ip-10-0-13-51.eu-west-1.compute.internal
+inflate-67cd5bb766-jnsdp       ip-10-0-13-51.eu-west-1.compute.internal
+inflate-67cd5bb766-k4gwf       ip-10-0-41-242.eu-west-1.compute.internal
+inflate-67cd5bb766-m49f6       ip-10-0-13-51.eu-west-1.compute.internal
+inflate-67cd5bb766-pgzx9       ip-10-0-8-151.eu-west-1.compute.internal
+aws-node-58m4v                 ip-10-0-3-57.eu-west-1.compute.internal
+aws-node-pj2gc                 ip-10-0-8-151.eu-west-1.compute.internal
+aws-node-thffj                 ip-10-0-41-242.eu-west-1.compute.internal
+aws-node-vh66d                 ip-10-0-13-51.eu-west-1.compute.internal
+coredns-844dbb9f6f-9g9lg       ip-10-0-41-242.eu-west-1.compute.internal
+coredns-844dbb9f6f-fmzfq       ip-10-0-41-242.eu-west-1.compute.internal
+eks-pod-identity-agent-jr2ns   ip-10-0-8-151.eu-west-1.compute.internal
+eks-pod-identity-agent-mpjkq   ip-10-0-13-51.eu-west-1.compute.internal
+eks-pod-identity-agent-q4tjc   ip-10-0-3-57.eu-west-1.compute.internal
+eks-pod-identity-agent-zzfdj   ip-10-0-41-242.eu-west-1.compute.internal
+karpenter-5b8965dc9b-rx9bx     ip-10-0-8-151.eu-west-1.compute.internal
+karpenter-5b8965dc9b-xrfnx     ip-10-0-41-242.eu-west-1.compute.internal
+kube-proxy-2xf42               ip-10-0-41-242.eu-west-1.compute.internal
+kube-proxy-kbfc8               ip-10-0-8-151.eu-west-1.compute.internal
+kube-proxy-kt8zn               ip-10-0-13-51.eu-west-1.compute.internal
+kube-proxy-sl6bz               ip-10-0-3-57.eu-west-1.compute.internal
+```
+
+### Tear Down & Clean-Up
+
+Because Karpenter manages the state of node resources outside of Terraform, Karpenter created resources will need to be de-provisioned first before removing the remaining resources with Terraform.
+
+1. Remove the example deployment created above and any nodes created by Karpenter
+
+```bash
+kubectl delete deployment inflate
+```
+
+2. Remove the resources created by Terraform
+
+```bash
+terraform destroy --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 3.0.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+| <a name="provider_aws.virginia"></a> [aws.virginia](#provider\_aws.virginia) | >= 6.0 |
+| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 3.0.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
+| <a name="module_karpenter"></a> [karpenter](#module\_karpenter) | ../../modules/karpenter | n/a |
+| <a name="module_karpenter_disabled"></a> [karpenter\_disabled](#module\_karpenter\_disabled) | ../../modules/karpenter | n/a |
+| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [helm_release.karpenter](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_ecrpublic_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecrpublic_authorization_token) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+No outputs.
+<!-- END_TF_DOCS -->
diff --git a/examples/karpenter/inflate.yaml b/examples/karpenter/inflate.yaml
new file mode 100644
index 0000000000..b70cfd8509
--- /dev/null
+++ b/examples/karpenter/inflate.yaml
@@ -0,0 +1,21 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: inflate
+spec:
+  replicas: 5
+  selector:
+    matchLabels:
+      app: inflate
+  template:
+    metadata:
+      labels:
+        app: inflate
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: inflate
+          image: public.ecr.aws/eks-distro/kubernetes/pause:3.7
+          resources:
+            requests:
+              cpu: 1
diff --git a/examples/karpenter/karpenter.yaml b/examples/karpenter/karpenter.yaml
new file mode 100644
index 0000000000..3d3107c06f
--- /dev/null
+++ b/examples/karpenter/karpenter.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: karpenter.k8s.aws/v1
+kind: EC2NodeClass
+metadata:
+  name: default
+spec:
+  amiSelectorTerms:
+    - alias: bottlerocket@latest
+  role: ex-karpenter
+  subnetSelectorTerms:
+    - tags:
+        karpenter.sh/discovery: ex-karpenter
+  securityGroupSelectorTerms:
+    - tags:
+        karpenter.sh/discovery: ex-karpenter
+  tags:
+    karpenter.sh/discovery: ex-karpenter
+---
+apiVersion: karpenter.sh/v1
+kind: NodePool
+metadata:
+  name: default
+spec:
+  template:
+    spec:
+      nodeClassRef:
+        group: karpenter.k8s.aws
+        kind: EC2NodeClass
+        name: default
+      requirements:
+        - key: "karpenter.k8s.aws/instance-category"
+          operator: In
+          values: ["c", "m", "r"]
+        - key: "karpenter.k8s.aws/instance-cpu"
+          operator: In
+          values: ["4", "8", "16", "32"]
+        - key: "karpenter.k8s.aws/instance-hypervisor"
+          operator: In
+          values: ["nitro"]
+        - key: "karpenter.k8s.aws/instance-generation"
+          operator: Gt
+          values: ["2"]
+  limits:
+    cpu: 1000
+  disruption:
+    consolidationPolicy: WhenEmpty
+    consolidateAfter: 30s
diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf
new file mode 100644
index 0000000000..6f652327c3
--- /dev/null
+++ b/examples/karpenter/main.tf
@@ -0,0 +1,194 @@
+provider "aws" {
+  region = local.region
+}
+
+provider "aws" {
+  region = "us-east-1"
+  alias  = "virginia"
+}
+
+provider "helm" {
+  kubernetes = {
+    host                   = module.eks.cluster_endpoint
+    cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+
+    exec = {
+      api_version = "client.authentication.k8s.io/v1beta1"
+      command     = "aws"
+      # This requires the awscli to be installed locally where Terraform is executed
+      args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
+    }
+  }
+}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+data "aws_ecrpublic_authorization_token" "token" {
+  provider = aws.virginia
+}
+
+locals {
+  name   = "ex-${basename(path.cwd)}"
+  region = "eu-west-1"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Example    = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+  source = "../.."
+
+  name               = local.name
+  kubernetes_version = "1.33"
+
+  # Gives Terraform identity admin access to cluster which will
+  # allow deploying resources (Karpenter) into the cluster
+  enable_cluster_creator_admin_permissions = true
+  endpoint_public_access                   = true
+
+  addons = {
+    coredns = {}
+    eks-pod-identity-agent = {
+      before_compute = true
+    }
+    kube-proxy = {}
+    vpc-cni = {
+      before_compute = true
+    }
+  }
+
+  vpc_id                   = module.vpc.vpc_id
+  subnet_ids               = module.vpc.private_subnets
+  control_plane_subnet_ids = module.vpc.intra_subnets
+
+  eks_managed_node_groups = {
+    karpenter = {
+      ami_type       = "BOTTLEROCKET_x86_64"
+      instance_types = ["m5.large"]
+
+      min_size     = 2
+      max_size     = 3
+      desired_size = 2
+
+      labels = {
+        # Used to ensure Karpenter runs on nodes that it does not manage
+        "karpenter.sh/controller" = "true"
+      }
+    }
+  }
+
+  node_security_group_tags = merge(local.tags, {
+    # NOTE - if creating multiple security groups with this module, only tag the
+    # security group that Karpenter should utilize with the following tag
+    # (i.e. - at most, only one security group should have this tag in your account)
+    "karpenter.sh/discovery" = local.name
+  })
+
+  tags = local.tags
+}
+
+################################################################################
+# Karpenter
+################################################################################
+
+module "karpenter" {
+  source = "../../modules/karpenter"
+
+  cluster_name = module.eks.cluster_name
+
+  # Name needs to match role name passed to the EC2NodeClass
+  node_iam_role_use_name_prefix   = false
+  node_iam_role_name              = local.name
+  create_pod_identity_association = true
+
+  # Used to attach additional IAM policies to the Karpenter node IAM role
+  node_iam_role_additional_policies = {
+    AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
+  }
+
+  tags = local.tags
+}
+
+module "karpenter_disabled" {
+  source = "../../modules/karpenter"
+
+  create = false
+}
+
+################################################################################
+# Karpenter Helm chart & manifests
+# Not required; just to demonstrate functionality of the sub-module
+################################################################################
+
+resource "helm_release" "karpenter" {
+  namespace           = "kube-system"
+  name                = "karpenter"
+  repository          = "oci://public.ecr.aws/karpenter"
+  repository_username = data.aws_ecrpublic_authorization_token.token.user_name
+  repository_password = data.aws_ecrpublic_authorization_token.token.password
+  chart               = "karpenter"
+  version             = "1.6.0"
+  wait                = false
+
+  values = [
+    <<-EOT
+    nodeSelector:
+      karpenter.sh/controller: 'true'
+    dnsPolicy: Default
+    settings:
+      clusterName: ${module.eks.cluster_name}
+      clusterEndpoint: ${module.eks.cluster_endpoint}
+      interruptionQueue: ${module.karpenter.queue_name}
+    webhook:
+      enabled: false
+    EOT
+  ]
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+    # Tags subnets for Karpenter auto-discovery
+    "karpenter.sh/discovery" = local.name
+  }
+
+  tags = local.tags
+}
diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/karpenter/variables.tf b/examples/karpenter/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf
new file mode 100644
index 0000000000..4e81032c58
--- /dev/null
+++ b/examples/karpenter/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 3.0.0"
+    }
+  }
+}
diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf
deleted file mode 100644
index fe5df294ce..0000000000
--- a/examples/launch_templates/main.tf
+++ /dev/null
@@ -1,71 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-lt-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc-lt"
-  cidr                 = "10.0.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  public_subnets       = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-  enable_dns_hostnames = true
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.public_subnets
-  vpc_id          = module.vpc.vpc_id
-
-  worker_groups_launch_template = [
-    {
-      name                 = "worker-group-1"
-      instance_type        = "t3.small"
-      asg_desired_capacity = 2
-      public_ip            = true
-    },
-    {
-      name                 = "worker-group-2"
-      instance_type        = "t3.medium"
-      asg_desired_capacity = 1
-      public_ip            = true
-    },
-    {
-      name                          = "worker-group-3"
-      instance_type                 = "t2.large"
-      asg_desired_capacity          = 1
-      public_ip                     = true
-      elastic_inference_accelerator = "eia2.medium"
-    },
-  ]
-}
diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf
deleted file mode 100644
index a0788aff1d..0000000000
--- a/examples/launch_templates/outputs.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
-
diff --git a/examples/launch_templates/pre_userdata.sh b/examples/launch_templates/pre_userdata.sh
deleted file mode 100644
index 52dd50f28c..0000000000
--- a/examples/launch_templates/pre_userdata.sh
+++ /dev/null
@@ -1 +0,0 @@
-yum update -y
diff --git a/examples/launch_templates/variables.tf b/examples/launch_templates/variables.tf
deleted file mode 100644
index f69e50026b..0000000000
--- a/examples/launch_templates/variables.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
diff --git a/examples/launch_templates/versions.tf b/examples/launch_templates/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/launch_templates/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf b/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf
deleted file mode 100644
index 0f51fb1fde..0000000000
--- a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf
+++ /dev/null
@@ -1,77 +0,0 @@
-# if you have used ASGs before, that role got auto-created already and you need to import to TF state
-resource "aws_iam_service_linked_role" "autoscaling" {
-  aws_service_name = "autoscaling.amazonaws.com"
-  description      = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
-}
-
-data "aws_caller_identity" "current" {}
-
-# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
-data "aws_iam_policy_document" "ebs_decryption" {
-  # Copy of default KMS policy that lets you manage it
-  statement {
-    sid    = "Enable IAM User Permissions"
-    effect = "Allow"
-
-    principals {
-      type        = "AWS"
-      identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
-    }
-
-    actions = [
-      "kms:*"
-    ]
-
-    resources = ["*"]
-  }
-
-  # Required for EKS
-  statement {
-    sid    = "Allow service-linked role use of the CMK"
-    effect = "Allow"
-
-    principals {
-      type = "AWS"
-      identifiers = [
-        "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
-        module.eks.cluster_iam_role_arn,                                                                                                            # required for the cluster / persistentvolume-controller to create encrypted PVCs
-      ]
-    }
-
-    actions = [
-      "kms:Encrypt",
-      "kms:Decrypt",
-      "kms:ReEncrypt*",
-      "kms:GenerateDataKey*",
-      "kms:DescribeKey"
-    ]
-
-    resources = ["*"]
-  }
-
-  statement {
-    sid    = "Allow attachment of persistent resources"
-    effect = "Allow"
-
-    principals {
-      type = "AWS"
-      identifiers = [
-        "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
-        module.eks.cluster_iam_role_arn,                                                                                                            # required for the cluster / persistentvolume-controller to create encrypted PVCs
-      ]
-    }
-
-    actions = [
-      "kms:CreateGrant"
-    ]
-
-    resources = ["*"]
-
-    condition {
-      test     = "Bool"
-      variable = "kms:GrantIsForAWSResource"
-      values   = ["true"]
-    }
-
-  }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf b/examples/launch_templates_with_managed_node_groups/launchtemplate.tf
deleted file mode 100644
index e66bad1a9f..0000000000
--- a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf
+++ /dev/null
@@ -1,91 +0,0 @@
-data "template_file" "launch_template_userdata" {
-  template = file("${path.module}/templates/userdata.sh.tpl")
-
-  vars = {
-    cluster_name        = local.cluster_name
-    endpoint            = module.eks.cluster_endpoint
-    cluster_auth_base64 = module.eks.cluster_certificate_authority_data
-
-    bootstrap_extra_args = ""
-    kubelet_extra_args   = ""
-  }
-}
-
-# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
-# there are several more options one could set but you probably dont need to modify them
-# you can take the default and add your custom AMI and/or custom tags
-#
-# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-# then the default user-data for bootstrapping a cluster is merged in the copy.
-resource "aws_launch_template" "default" {
-  name_prefix            = "eks-example-"
-  description            = "Default Launch-Template"
-  update_default_version = true
-
-  block_device_mappings {
-    device_name = "/dev/xvda"
-
-    ebs {
-      volume_size           = 100
-      volume_type           = "gp2"
-      delete_on_termination = true
-      # encrypted             = true
-
-      # Enable this if you want to encrypt your node root volumes with a KMS/CMK. encryption of PVCs is handled via k8s StorageClass tho
-      # you also need to attach data.aws_iam_policy_document.ebs_decryption.json from the disk_encryption_policy.tf to the KMS/CMK key then !!
-      # kms_key_id            = var.kms_key_arn
-    }
-  }
-
-  instance_type = var.instance_type
-
-  monitoring {
-    enabled = true
-  }
-
-  network_interfaces {
-    associate_public_ip_address = false
-    delete_on_termination       = true
-    security_groups             = [module.eks.worker_security_group_id]
-  }
-
-  # if you want to use a custom AMI
-  # image_id      = var.ami_id
-
-  # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
-  # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
-  #
-  # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
-
-  # user_data = base64encode(
-  #   data.template_file.launch_template_userdata.rendered,
-  # )
-
-
-  # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
-  tag_specifications {
-    resource_type = "instance"
-
-    tags = {
-      CustomTag = "EKS example"
-    }
-  }
-
-  # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
-  tag_specifications {
-    resource_type = "volume"
-
-    tags = {
-      CustomTag = "EKS example"
-    }
-  }
-
-  # Tag the LT itself
-  tags = {
-    CustomTag = "EKS example"
-  }
-
-  lifecycle {
-    create_before_destroy = true
-  }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/main.tf b/examples/launch_templates_with_managed_node_groups/main.tf
deleted file mode 100644
index 8d99dcfee5..0000000000
--- a/examples/launch_templates_with_managed_node_groups/main.tf
+++ /dev/null
@@ -1,71 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-lt-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc"
-  cidr                 = "172.16.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  private_subnets      = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
-  public_subnets       = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
-  enable_nat_gateway   = true
-  single_nat_gateway   = true
-  enable_dns_hostnames = true
-
-  private_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared" # EKS adds this and TF would want to remove then later
-  }
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.private_subnets
-  vpc_id          = module.vpc.vpc_id
-
-  node_groups = {
-    example = {
-      desired_capacity = 1
-      max_capacity     = 15
-      min_capacity     = 1
-
-      launch_template_id      = aws_launch_template.default.id
-      launch_template_version = aws_launch_template.default.default_version
-
-      additional_tags = {
-        CustomTag = "EKS example"
-      }
-    }
-  }
-}
diff --git a/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl b/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl
deleted file mode 100644
index 41eeb0ba03..0000000000
--- a/examples/launch_templates_with_managed_node_groups/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,12 +0,0 @@
-MIME-Version: 1.0
-Content-Type: multipart/mixed; boundary="//"
-
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-#!/bin/bash
-set -e
-
-# Bootstrap and join the cluster
-/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
-
---//--
diff --git a/examples/launch_templates_with_managed_node_groups/variables.tf b/examples/launch_templates_with_managed_node_groups/variables.tf
deleted file mode 100644
index 6dcb269f4c..0000000000
--- a/examples/launch_templates_with_managed_node_groups/variables.tf
+++ /dev/null
@@ -1,15 +0,0 @@
-variable "region" {
-  default = "eu-central-1"
-}
-
-variable "instance_type" {
-  # Smallest recommended, where ~1.1Gb of 2Gb memory is available for the Kubernetes pods after ‘warming up’ Docker, Kubelet, and OS
-  default = "t3.small"
-  type    = string
-}
-
-variable "kms_key_arn" {
-  default     = ""
-  description = "KMS key ARN to use if you want to encrypt EKS node root volumes"
-  type        = string
-}
diff --git a/examples/launch_templates_with_managed_node_groups/versions.tf b/examples/launch_templates_with_managed_node_groups/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/launch_templates_with_managed_node_groups/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf
deleted file mode 100644
index 4c8b3fed6c..0000000000
--- a/examples/managed_node_groups/main.tf
+++ /dev/null
@@ -1,119 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc"
-  cidr                 = "172.16.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  private_subnets      = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
-  public_subnets       = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
-  enable_nat_gateway   = true
-  single_nat_gateway   = true
-  enable_dns_hostnames = true
-
-  public_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/elb"                      = "1"
-  }
-
-  private_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/internal-elb"             = "1"
-  }
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.private_subnets
-
-  tags = {
-    Environment = "test"
-    GithubRepo  = "terraform-aws-eks"
-    GithubOrg   = "terraform-aws-modules"
-  }
-
-  vpc_id = module.vpc.vpc_id
-
-  node_groups_defaults = {
-    ami_type  = "AL2_x86_64"
-    disk_size = 50
-  }
-
-  node_groups = {
-    example = {
-      desired_capacity = 1
-      max_capacity     = 10
-      min_capacity     = 1
-
-      instance_types = ["t3.large"]
-      capacity_type  = "SPOT"
-      k8s_labels = {
-        Environment = "test"
-        GithubRepo  = "terraform-aws-eks"
-        GithubOrg   = "terraform-aws-modules"
-      }
-      additional_tags = {
-        ExtraTag = "example"
-      }
-      taints = [
-        {
-          key    = "dedicated"
-          value  = "gpuGroup"
-          effect = "NO_SCHEDULE"
-        }
-      ]
-    }
-  }
-
-  # Create security group rules to allow communication between pods on workers and pods in managed node groups.
-  # Set this to true if you have AWS-Managed node groups and Self-Managed worker groups.
-  # See https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1089
-
-  # worker_create_cluster_primary_security_group_rules = true
-
-  # worker_groups_launch_template = [
-  #   {
-  #     name                 = "worker-group-1"
-  #     instance_type        = "t3.small"
-  #     asg_desired_capacity = 2
-  #     public_ip            = true
-  #   }
-  # ]
-
-  map_roles    = var.map_roles
-  map_users    = var.map_users
-  map_accounts = var.map_accounts
-}
diff --git a/examples/managed_node_groups/outputs.tf b/examples/managed_node_groups/outputs.tf
deleted file mode 100644
index 7010db294f..0000000000
--- a/examples/managed_node_groups/outputs.tf
+++ /dev/null
@@ -1,29 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
-
-output "node_groups" {
-  description = "Outputs from node groups"
-  value       = module.eks.node_groups
-}
diff --git a/examples/managed_node_groups/variables.tf b/examples/managed_node_groups/variables.tf
deleted file mode 100644
index 7085aeabd4..0000000000
--- a/examples/managed_node_groups/variables.tf
+++ /dev/null
@@ -1,52 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
-variable "map_accounts" {
-  description = "Additional AWS account numbers to add to the aws-auth configmap."
-  type        = list(string)
-
-  default = [
-    "777777777777",
-    "888888888888",
-  ]
-}
-
-variable "map_roles" {
-  description = "Additional IAM roles to add to the aws-auth configmap."
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      rolearn  = "arn:aws:iam::66666666666:role/role1"
-      username = "role1"
-      groups   = ["system:masters"]
-    },
-  ]
-}
-
-variable "map_users" {
-  description = "Additional IAM users to add to the aws-auth configmap."
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user1"
-      username = "user1"
-      groups   = ["system:masters"]
-    },
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user2"
-      username = "user2"
-      groups   = ["system:masters"]
-    },
-  ]
-}
diff --git a/examples/managed_node_groups/versions.tf b/examples/managed_node_groups/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/managed_node_groups/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf
deleted file mode 100644
index 9aebd4cbe5..0000000000
--- a/examples/secrets_encryption/main.tf
+++ /dev/null
@@ -1,93 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-resource "aws_kms_key" "eks" {
-  description = "EKS Secret Encryption Key"
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc"
-  cidr                 = "10.0.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  private_subnets      = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
-  public_subnets       = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-  enable_nat_gateway   = true
-  single_nat_gateway   = true
-  enable_dns_hostnames = true
-
-  public_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/elb"                      = "1"
-  }
-
-  private_subnet_tags = {
-    "kubernetes.io/cluster/${local.cluster_name}" = "shared"
-    "kubernetes.io/role/internal-elb"             = "1"
-  }
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.private_subnets
-
-  cluster_encryption_config = [
-    {
-      provider_key_arn = aws_kms_key.eks.arn
-      resources        = ["secrets"]
-    }
-  ]
-
-  tags = {
-    Environment = "test"
-    GithubRepo  = "terraform-aws-eks"
-    GithubOrg   = "terraform-aws-modules"
-  }
-
-  vpc_id = module.vpc.vpc_id
-
-  worker_groups = [
-    {
-      name                 = "worker-group-1"
-      instance_type        = "t3.small"
-      additional_userdata  = "echo foo bar"
-      asg_desired_capacity = 2
-    },
-  ]
-
-  map_roles    = var.map_roles
-  map_users    = var.map_users
-  map_accounts = var.map_accounts
-}
diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf
deleted file mode 100644
index 51ddb024a2..0000000000
--- a/examples/secrets_encryption/outputs.tf
+++ /dev/null
@@ -1,24 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
diff --git a/examples/secrets_encryption/variables.tf b/examples/secrets_encryption/variables.tf
deleted file mode 100644
index 7085aeabd4..0000000000
--- a/examples/secrets_encryption/variables.tf
+++ /dev/null
@@ -1,52 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
-variable "map_accounts" {
-  description = "Additional AWS account numbers to add to the aws-auth configmap."
-  type        = list(string)
-
-  default = [
-    "777777777777",
-    "888888888888",
-  ]
-}
-
-variable "map_roles" {
-  description = "Additional IAM roles to add to the aws-auth configmap."
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      rolearn  = "arn:aws:iam::66666666666:role/role1"
-      username = "role1"
-      groups   = ["system:masters"]
-    },
-  ]
-}
-
-variable "map_users" {
-  description = "Additional IAM users to add to the aws-auth configmap."
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = [
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user1"
-      username = "user1"
-      groups   = ["system:masters"]
-    },
-    {
-      userarn  = "arn:aws:iam::66666666666:user/user2"
-      username = "user2"
-      groups   = ["system:masters"]
-    },
-  ]
-}
diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/secrets_encryption/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/examples/self-managed-node-group/README.md b/examples/self-managed-node-group/README.md
new file mode 100644
index 0000000000..ce84716f8a
--- /dev/null
+++ b/examples/self-managed-node-group/README.md
@@ -0,0 +1,20 @@
+# Self-managed Node Group Examples
+
+Configuration in this directory creates Amazon EKS clusters with self-managed node groups demonstrating different configurations:
+
+- `eks-al2023.tf` demonstrates an EKS cluster using self-managed node group that utilizes the EKS Amazon Linux 2023 optimized AMI
+- `eks-bottlerocket.tf` demonstrates an EKS cluster using self-managed node group that utilizes the Bottlerocket EKS optimized AMI
+
+The different cluster configuration examples provided are separated per file and independent of the other cluster configurations.
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/self-managed-node-group/eks-al2023.tf b/examples/self-managed-node-group/eks-al2023.tf
new file mode 100644
index 0000000000..ca17ac67d9
--- /dev/null
+++ b/examples/self-managed-node-group/eks-al2023.tf
@@ -0,0 +1,54 @@
+module "eks_al2023" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = "${local.name}-al2023"
+  kubernetes_version = "1.33"
+
+  # EKS Addons
+  addons = {
+    coredns = {}
+    eks-pod-identity-agent = {
+      before_compute = true
+    }
+    kube-proxy = {}
+    vpc-cni = {
+      before_compute = true
+    }
+  }
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  self_managed_node_groups = {
+    example = {
+      ami_type      = "AL2023_x86_64_STANDARD"
+      instance_type = "m6i.large"
+
+      min_size = 2
+      max_size = 5
+      # This value is ignored after the initial creation
+      # https://github.com/bryantbiggs/eks-desired-size-hack
+      desired_size = 2
+
+      # This is not required - demonstrates how to pass additional configuration to nodeadm
+      # Ref https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
+      cloudinit_pre_nodeadm = [
+        {
+          content_type = "application/node.eks.aws"
+          content      = <<-EOT
+            ---
+            apiVersion: node.eks.aws/v1alpha1
+            kind: NodeConfig
+            spec:
+              kubelet:
+                config:
+                  shutdownGracePeriod: 30s
+          EOT
+        }
+      ]
+    }
+  }
+
+  tags = local.tags
+}
diff --git a/examples/self-managed-node-group/eks-bottlerocket.tf b/examples/self-managed-node-group/eks-bottlerocket.tf
new file mode 100644
index 0000000000..be9b2450a9
--- /dev/null
+++ b/examples/self-managed-node-group/eks-bottlerocket.tf
@@ -0,0 +1,56 @@
+module "eks_bottlerocket" {
+  source  = "terraform-aws-modules/eks/aws"
+  version = "~> 21.0"
+
+  name               = "${local.name}-bottlerocket"
+  kubernetes_version = "1.33"
+
+  # EKS Addons
+  addons = {
+    coredns = {}
+    eks-pod-identity-agent = {
+      before_compute = true
+    }
+    kube-proxy = {}
+    vpc-cni = {
+      before_compute = true
+    }
+  }
+
+  vpc_id     = module.vpc.vpc_id
+  subnet_ids = module.vpc.private_subnets
+
+  self_managed_node_groups = {
+    example = {
+      ami_type      = "BOTTLEROCKET_x86_64"
+      instance_type = "m6i.large"
+
+      min_size = 2
+      max_size = 5
+      # This value is ignored after the initial creation
+      # https://github.com/bryantbiggs/eks-desired-size-hack
+      desired_size = 2
+
+      # This is not required - demonstrates how to pass additional configuration
+      # Ref https://bottlerocket.dev/en/os/1.19.x/api/settings/
+      bootstrap_extra_args = <<-EOT
+        # The admin host container provides SSH access and runs with "superpowers".
+        # It is disabled by default, but can be disabled explicitly.
+        [settings.host-containers.admin]
+        enabled = false
+
+        # The control host container provides out-of-band access via SSM.
+        # It is enabled by default, and can be disabled if you do not expect to use SSM.
+        # This could leave you with no way to access the API and change settings on an existing node!
+        [settings.host-containers.control]
+        enabled = true
+
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+      EOT
+    }
+  }
+
+  tags = local.tags
+}
diff --git a/examples/self-managed-node-group/main.tf b/examples/self-managed-node-group/main.tf
new file mode 100644
index 0000000000..903ee577b9
--- /dev/null
+++ b/examples/self-managed-node-group/main.tf
@@ -0,0 +1,55 @@
+provider "aws" {
+  region = local.region
+}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+locals {
+  name   = "ex-self-mng"
+  region = "eu-west-1"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Example    = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# VPC
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
diff --git a/examples/self-managed-node-group/outputs.tf b/examples/self-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/self-managed-node-group/variables.tf b/examples/self-managed-node-group/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/examples/self-managed-node-group/versions.tf b/examples/self-managed-node-group/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/examples/self-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/examples/spot_instances/main.tf b/examples/spot_instances/main.tf
deleted file mode 100644
index fb2ad23ee9..0000000000
--- a/examples/spot_instances/main.tf
+++ /dev/null
@@ -1,61 +0,0 @@
-provider "aws" {
-  region = var.region
-}
-
-data "aws_eks_cluster" "cluster" {
-  name = module.eks.cluster_id
-}
-
-data "aws_eks_cluster_auth" "cluster" {
-  name = module.eks.cluster_id
-}
-
-provider "kubernetes" {
-  host                   = data.aws_eks_cluster.cluster.endpoint
-  cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
-  token                  = data.aws_eks_cluster_auth.cluster.token
-  load_config_file       = false
-}
-
-data "aws_availability_zones" "available" {
-}
-
-locals {
-  cluster_name = "test-eks-spot-${random_string.suffix.result}"
-}
-
-resource "random_string" "suffix" {
-  length  = 8
-  special = false
-}
-
-module "vpc" {
-  source  = "terraform-aws-modules/vpc/aws"
-  version = "~> 2.47"
-
-  name                 = "test-vpc-spot"
-  cidr                 = "10.0.0.0/16"
-  azs                  = data.aws_availability_zones.available.names
-  public_subnets       = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
-  enable_dns_hostnames = true
-}
-
-module "eks" {
-  source          = "../.."
-  cluster_name    = local.cluster_name
-  cluster_version = "1.20"
-  subnets         = module.vpc.public_subnets
-  vpc_id          = module.vpc.vpc_id
-
-  worker_groups_launch_template = [
-    {
-      name                    = "spot-1"
-      override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
-      spot_instance_pools     = 4
-      asg_max_size            = 5
-      asg_desired_capacity    = 5
-      kubelet_extra_args      = "--node-labels=node.kubernetes.io/lifecycle=spot"
-      public_ip               = true
-    },
-  ]
-}
diff --git a/examples/spot_instances/outputs.tf b/examples/spot_instances/outputs.tf
deleted file mode 100644
index a0788aff1d..0000000000
--- a/examples/spot_instances/outputs.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-output "cluster_endpoint" {
-  description = "Endpoint for EKS control plane."
-  value       = module.eks.cluster_endpoint
-}
-
-output "cluster_security_group_id" {
-  description = "Security group ids attached to the cluster control plane."
-  value       = module.eks.cluster_security_group_id
-}
-
-output "kubectl_config" {
-  description = "kubectl config as generated by the module."
-  value       = module.eks.kubeconfig
-}
-
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = module.eks.config_map_aws_auth
-}
-
-output "region" {
-  description = "AWS region."
-  value       = var.region
-}
-
diff --git a/examples/spot_instances/variables.tf b/examples/spot_instances/variables.tf
deleted file mode 100644
index f69e50026b..0000000000
--- a/examples/spot_instances/variables.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-variable "region" {
-  default = "us-west-2"
-}
-
diff --git a/examples/spot_instances/versions.tf b/examples/spot_instances/versions.tf
deleted file mode 100644
index 6e29ae8f1b..0000000000
--- a/examples/spot_instances/versions.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws        = ">= 3.22.0"
-    local      = ">= 1.4"
-    random     = ">= 2.1"
-    kubernetes = "~> 1.11"
-  }
-}
diff --git a/fargate.tf b/fargate.tf
deleted file mode 100644
index 413c582a9c..0000000000
--- a/fargate.tf
+++ /dev/null
@@ -1,23 +0,0 @@
-module "fargate" {
-  source                            = "./modules/fargate"
-  cluster_name                      = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-  create_eks                        = var.create_eks
-  create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
-  fargate_pod_execution_role_name   = var.fargate_pod_execution_role_name
-  fargate_profiles                  = var.fargate_profiles
-  permissions_boundary              = var.permissions_boundary
-  iam_path                          = var.iam_path
-  iam_policy_arn_prefix             = local.policy_arn_prefix
-  subnets                           = var.subnets
-  tags                              = var.tags
-
-  # Hack to ensure ordering of resource creation.
-  # This is a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2
-  # Do not create node_groups before other resources are ready and removes race conditions
-  # Ensure these resources are created before "unlocking" the data source.
-  # Will be removed in Terraform 0.13
-  eks_depends_on = [
-    aws_eks_cluster.this,
-    kubernetes_config_map.aws_auth,
-  ]
-}
diff --git a/irsa.tf b/irsa.tf
deleted file mode 100644
index 9c5d653a25..0000000000
--- a/irsa.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-# Enable IAM Roles for EKS Service-Accounts (IRSA).
-
-# The Root CA Thumbprint for an OpenID Connect Identity Provider is currently
-# Being passed as a default value which is the same for all regions and
-# Is valid until (Jun 28 17:39:16 2034 GMT).
-# https://crt.sh/?q=9E99A48A9960B14926BB7F3B02E22DA2B0AB7280
-# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
-# https://github.com/terraform-providers/terraform-provider-aws/issues/10104
-
-resource "aws_iam_openid_connect_provider" "oidc_provider" {
-  count           = var.enable_irsa && var.create_eks ? 1 : 0
-  client_id_list  = [local.sts_principal]
-  thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
-  url             = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc.0.issuer, [""]))[0]
-
-  tags = merge(
-    {
-      Name = "${var.cluster_name}-eks-irsa"
-    },
-    var.tags
-  )
-}
diff --git a/kubectl.tf b/kubectl.tf
deleted file mode 100644
index 21021f92c6..0000000000
--- a/kubectl.tf
+++ /dev/null
@@ -1,7 +0,0 @@
-resource "local_file" "kubeconfig" {
-  count                = var.write_kubeconfig && var.create_eks ? 1 : 0
-  content              = local.kubeconfig
-  filename             = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
-  file_permission      = var.kubeconfig_file_permission
-  directory_permission = "0755"
-}
diff --git a/local.tf b/local.tf
deleted file mode 100644
index dc836a4729..0000000000
--- a/local.tf
+++ /dev/null
@@ -1,273 +0,0 @@
-locals {
-
-  cluster_security_group_id         = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
-  cluster_primary_security_group_id = var.cluster_version >= 1.14 ? element(concat(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, [""]), 0) : null
-  cluster_iam_role_name             = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
-  cluster_iam_role_arn              = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
-  worker_security_group_id          = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
-
-  default_platform       = "linux"
-  default_iam_role_id    = concat(aws_iam_role.workers.*.id, [""])[0]
-  default_ami_id_linux   = local.workers_group_defaults.ami_id != "" ? local.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0]
-  default_ami_id_windows = local.workers_group_defaults.ami_id_windows != "" ? local.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0]
-
-  kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name
-
-  worker_group_count                 = length(var.worker_groups)
-  worker_group_launch_template_count = length(var.worker_groups_launch_template)
-
-  worker_has_linux_ami = length([for x in concat(var.worker_groups, var.worker_groups_launch_template) : x if lookup(
-    x,
-    "platform",
-    # Fallback on default `platform` if it's not defined in current worker group
-    lookup(
-      merge({ platform = local.default_platform }, var.workers_group_defaults),
-      "platform",
-      null
-    )
-  ) == "linux"]) > 0
-  worker_has_windows_ami = length([for x in concat(var.worker_groups, var.worker_groups_launch_template) : x if lookup(
-    x,
-    "platform",
-    # Fallback on default `platform` if it's not defined in current worker group
-    lookup(
-      merge({ platform = local.default_platform }, var.workers_group_defaults),
-      "platform",
-      null
-    )
-  ) == "windows"]) > 0
-
-  worker_ami_name_filter = var.worker_ami_name_filter != "" ? var.worker_ami_name_filter : "amazon-eks-node-${var.cluster_version}-v*"
-  # Windows nodes are available from k8s 1.14. If cluster version is less than 1.14, fix ami filter to some constant to not fail on 'terraform plan'.
-  worker_ami_name_filter_windows = (var.worker_ami_name_filter_windows != "" ?
-    var.worker_ami_name_filter_windows : "Windows_Server-2019-English-Core-EKS_Optimized-${tonumber(var.cluster_version) >= 1.14 ? var.cluster_version : 1.14}-*"
-  )
-
-  ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
-  sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
-
-  policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
-  workers_group_defaults_defaults = {
-    name                              = "count.index"               # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
-    tags                              = []                          # A list of map defining extra tags to be applied to the worker group autoscaling group.
-    ami_id                            = ""                          # AMI ID for the eks linux based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
-    ami_id_windows                    = ""                          # AMI ID for the eks windows based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
-    asg_desired_capacity              = "1"                         # Desired worker capacity in the autoscaling group and changing its value will not affect the autoscaling group's desired capacity because the cluster-autoscaler manages up and down scaling of the nodes. Cluster-autoscaler add nodes when pods are in pending state and remove the nodes when they are not required by modifying the desirec_capacity of the autoscaling group. Although an issue exists in which if the value of the asg_min_size is changed it modifies the value of asg_desired_capacity.
-    asg_max_size                      = "3"                         # Maximum worker capacity in the autoscaling group.
-    asg_min_size                      = "1"                         # Minimum worker capacity in the autoscaling group. NOTE: Change in this paramater will affect the asg_desired_capacity, like changing its value to 2 will change asg_desired_capacity value to 2 but bringing back it to 1 will not affect the asg_desired_capacity.
-    asg_force_delete                  = false                       # Enable forced deletion for the autoscaling group.
-    asg_initial_lifecycle_hooks       = []                          # Initital lifecycle hook for the autoscaling group.
-    default_cooldown                  = null                        # The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
-    health_check_type                 = null                        # Controls how health checking is done. Valid values are "EC2" or "ELB".
-    health_check_grace_period         = null                        # Time in seconds after instance comes into service before checking health.
-    instance_type                     = "m4.large"                  # Size of the workers instances.
-    instance_store_virtual_name       = "ephemeral0"                # "virtual_name" of the instance store volume.
-    spot_price                        = ""                          # Cost of spot instance.
-    placement_tenancy                 = ""                          # The tenancy of the instance. Valid values are "default" or "dedicated".
-    root_volume_size                  = "100"                       # root volume size of workers instances.
-    root_volume_type                  = "gp2"                       # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
-    root_iops                         = "0"                         # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
-    root_volume_throughput            = null                        # The amount of throughput to provision for a gp3 volume.
-    key_name                          = ""                          # The key pair name that should be used for the instances in the autoscaling group
-    pre_userdata                      = ""                          # userdata to pre-append to the default userdata.
-    userdata_template_file            = ""                          # alternate template to use for userdata
-    userdata_template_extra_args      = {}                          # Additional arguments to use when expanding the userdata template file
-    bootstrap_extra_args              = ""                          # Extra arguments passed to the bootstrap.sh script from the EKS AMI (Amazon Machine Image).
-    additional_userdata               = ""                          # userdata to append to the default userdata.
-    ebs_optimized                     = true                        # sets whether to use ebs optimization on supported types.
-    enable_monitoring                 = true                        # Enables/disables detailed monitoring.
-    enclave_support                   = false                       # Enables/disables enclave support
-    public_ip                         = false                       # Associate a public ip address with a worker
-    kubelet_extra_args                = ""                          # This string is passed directly to kubelet if set. Useful for adding labels or taints.
-    subnets                           = var.subnets                 # A list of subnets to place the worker nodes in. i.e. ["subnet-123", "subnet-456", "subnet-789"]
-    additional_security_group_ids     = []                          # A list of additional security group ids to include in worker launch config
-    protect_from_scale_in             = false                       # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
-    iam_instance_profile_name         = ""                          # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
-    iam_role_id                       = "local.default_iam_role_id" # A custom IAM role id. Incompatible with iam_instance_profile_name.  Literal local.default_iam_role_id will never be used but if iam_role_id is not set, the local.default_iam_role_id interpolation will be used.
-    suspended_processes               = ["AZRebalance"]             # A list of processes to suspend. i.e. ["AZRebalance", "HealthCheck", "ReplaceUnhealthy"]
-    target_group_arns                 = null                        # A list of Application LoadBalancer (ALB) target group ARNs to be associated to the autoscaling group
-    load_balancers                    = null                        # A list of Classic LoadBalancer (CLB)'s name to be associated to the autoscaling group
-    enabled_metrics                   = []                          # A list of metrics to be collected i.e. ["GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity"]
-    placement_group                   = null                        # The name of the placement group into which to launch the instances, if any.
-    service_linked_role_arn           = ""                          # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS
-    termination_policies              = []                          # A list of policies to decide how the instances in the auto scale group should be terminated.
-    platform                          = local.default_platform      # Platform of workers. Either "linux" or "windows".
-    additional_ebs_volumes            = []                          # A list of additional volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), volume_size, volume_type, iops, encrypted, kms_key_id (only on launch-template), delete_on_termination. Optional values are grabbed from root volume or from defaults
-    additional_instance_store_volumes = []                          # A list of additional instance store (local disk) volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), virtual_name.
-    warm_pool                         = null                        # If this block is configured, add a Warm Pool to the specified Auto Scaling group.
-
-    # Settings for launch templates
-    root_block_device_name               = concat(data.aws_ami.eks_worker.*.root_device_name, [""])[0]         # Root device name for Linux workers. If not provided, will assume default Linux AMI was used.
-    root_block_device_name_windows       = concat(data.aws_ami.eks_worker_windows.*.root_device_name, [""])[0] # Root device name for Windows workers. If not provided, will assume default Windows AMI was used.
-    root_kms_key_id                      = ""                                                                  # The KMS key to use when encrypting the root storage device
-    launch_template_id                   = null                                                                # The id of the launch template used for managed node_groups
-    launch_template_version              = "$Latest"                                                           # The latest version of the launch template to use in the autoscaling group
-    update_default_version               = false                                                               # Update the autoscaling group launch template's default version upon each update
-    launch_template_placement_tenancy    = "default"                                                           # The placement tenancy for instances
-    launch_template_placement_group      = null                                                                # The name of the placement group into which to launch the instances, if any.
-    root_encrypted                       = false                                                               # Whether the volume should be encrypted or not
-    eni_delete                           = true                                                                # Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying)
-    cpu_credits                          = "standard"                                                          # T2/T3 unlimited mode, can be 'standard' or 'unlimited'. Used 'standard' mode as default to avoid paying higher costs
-    market_type                          = null
-    metadata_http_endpoint               = "enabled"  # The state of the metadata service: enabled, disabled.
-    metadata_http_tokens                 = "optional" # If session tokens are required: optional, required.
-    metadata_http_put_response_hop_limit = null       # The desired HTTP PUT response hop limit for instance metadata requests.
-    # Settings for launch templates with mixed instances policy
-    override_instance_types                  = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] # A list of override instance types for mixed instances policy
-    on_demand_allocation_strategy            = null                                                 # Strategy to use when launching on-demand instances. Valid values: prioritized.
-    on_demand_base_capacity                  = "0"                                                  # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
-    on_demand_percentage_above_base_capacity = "0"                                                  # Percentage split between on-demand and Spot instances above the base on-demand capacity
-    spot_allocation_strategy                 = "lowest-price"                                       # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
-    spot_instance_pools                      = 10                                                   # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
-    spot_max_price                           = ""                                                   # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
-    max_instance_lifetime                    = 0                                                    # Maximum number of seconds instances can run in the ASG. 0 is unlimited.
-    elastic_inference_accelerator            = null                                                 # Type of elastic inference accelerator to be attached. Example values are eia1.medium, eia2.large, etc.
-    instance_refresh_enabled                 = false                                                # Enable instance refresh for the worker autoscaling group.
-    instance_refresh_strategy                = "Rolling"                                            # Strategy to use for instance refresh. Default is 'Rolling' which the only valid value.
-    instance_refresh_min_healthy_percentage  = 90                                                   # The amount of capacity in the ASG that must remain healthy during an instance refresh, as a percentage of the ASG's desired capacity.
-    instance_refresh_instance_warmup         = null                                                 # The number of seconds until a newly launched instance is configured and ready to use. Defaults to the ASG's health check grace period.
-    instance_refresh_triggers                = []                                                   # Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy.
-    capacity_rebalance                       = false                                                # Enable capacity rebalance
-  }
-
-  workers_group_defaults = merge(
-    local.workers_group_defaults_defaults,
-    var.workers_group_defaults,
-  )
-
-  ebs_optimized_not_supported = [
-    "c1.medium",
-    "c3.8xlarge",
-    "c3.large",
-    "c5d.12xlarge",
-    "c5d.24xlarge",
-    "c5d.metal",
-    "cc2.8xlarge",
-    "cr1.8xlarge",
-    "g2.8xlarge",
-    "g4dn.metal",
-    "hs1.8xlarge",
-    "i2.8xlarge",
-    "m1.medium",
-    "m1.small",
-    "m2.xlarge",
-    "m3.large",
-    "m3.medium",
-    "m5ad.16xlarge",
-    "m5ad.8xlarge",
-    "m5dn.metal",
-    "m5n.metal",
-    "r3.8xlarge",
-    "r3.large",
-    "r5ad.16xlarge",
-    "r5ad.8xlarge",
-    "r5dn.metal",
-    "r5n.metal",
-    "t1.micro",
-    "t2.2xlarge",
-    "t2.large",
-    "t2.medium",
-    "t2.micro",
-    "t2.nano",
-    "t2.small",
-    "t2.xlarge"
-  ]
-
-  kubeconfig = var.create_eks ? templatefile("${path.module}/templates/kubeconfig.tpl", {
-    kubeconfig_name                   = local.kubeconfig_name
-    endpoint                          = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
-    cluster_auth_base64               = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
-    aws_authenticator_command         = var.kubeconfig_aws_authenticator_command
-    aws_authenticator_command_args    = length(var.kubeconfig_aws_authenticator_command_args) > 0 ? var.kubeconfig_aws_authenticator_command_args : ["token", "-i", coalescelist(aws_eks_cluster.this[*].name, [""])[0]]
-    aws_authenticator_additional_args = var.kubeconfig_aws_authenticator_additional_args
-    aws_authenticator_env_variables   = var.kubeconfig_aws_authenticator_env_variables
-  }) : ""
-
-  userdata_rendered = [
-    for index in range(var.create_eks ? local.worker_group_count : 0) : templatefile(
-      lookup(
-        var.worker_groups[index],
-        "userdata_template_file",
-        lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"]) == "windows"
-        ? "${path.module}/templates/userdata_windows.tpl"
-        : "${path.module}/templates/userdata.sh.tpl"
-      ),
-      merge({
-        platform            = lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"])
-        cluster_name        = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-        endpoint            = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
-        cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
-        pre_userdata = lookup(
-          var.worker_groups[index],
-          "pre_userdata",
-          local.workers_group_defaults["pre_userdata"],
-        )
-        additional_userdata = lookup(
-          var.worker_groups[index],
-          "additional_userdata",
-          local.workers_group_defaults["additional_userdata"],
-        )
-        bootstrap_extra_args = lookup(
-          var.worker_groups[index],
-          "bootstrap_extra_args",
-          local.workers_group_defaults["bootstrap_extra_args"],
-        )
-        kubelet_extra_args = lookup(
-          var.worker_groups[index],
-          "kubelet_extra_args",
-          local.workers_group_defaults["kubelet_extra_args"],
-        )
-        },
-        lookup(
-          var.worker_groups[index],
-          "userdata_template_extra_args",
-          local.workers_group_defaults["userdata_template_extra_args"]
-        )
-      )
-    )
-  ]
-
-  launch_template_userdata_rendered = [
-    for index in range(var.create_eks ? local.worker_group_launch_template_count : 0) : templatefile(
-      lookup(
-        var.worker_groups_launch_template[index],
-        "userdata_template_file",
-        lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"]) == "windows"
-        ? "${path.module}/templates/userdata_windows.tpl"
-        : "${path.module}/templates/userdata.sh.tpl"
-      ),
-      merge({
-        platform            = lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"])
-        cluster_name        = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-        endpoint            = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
-        cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
-        pre_userdata = lookup(
-          var.worker_groups_launch_template[index],
-          "pre_userdata",
-          local.workers_group_defaults["pre_userdata"],
-        )
-        additional_userdata = lookup(
-          var.worker_groups_launch_template[index],
-          "additional_userdata",
-          local.workers_group_defaults["additional_userdata"],
-        )
-        bootstrap_extra_args = lookup(
-          var.worker_groups_launch_template[index],
-          "bootstrap_extra_args",
-          local.workers_group_defaults["bootstrap_extra_args"],
-        )
-        kubelet_extra_args = lookup(
-          var.worker_groups_launch_template[index],
-          "kubelet_extra_args",
-          local.workers_group_defaults["kubelet_extra_args"],
-        )
-        },
-        lookup(
-          var.worker_groups_launch_template[index],
-          "userdata_template_extra_args",
-          local.workers_group_defaults["userdata_template_extra_args"]
-        )
-      )
-    )
-  ]
-}
diff --git a/main.tf b/main.tf
new file mode 100644
index 0000000000..48207dc238
--- /dev/null
+++ b/main.tf
@@ -0,0 +1,929 @@
+data "aws_partition" "current" {
+  count = local.create ? 1 : 0
+}
+data "aws_caller_identity" "current" {
+  count = local.create ? 1 : 0
+}
+
+data "aws_iam_session_context" "current" {
+  count = local.create ? 1 : 0
+
+  # This data source provides information on the IAM source role of an STS assumed role
+  # For non-role ARNs, this data source simply passes the ARN through issuer ARN
+  # Ref https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2327#issuecomment-1355581682
+  # Ref https://github.com/hashicorp/terraform-provider-aws/issues/28381
+  arn = try(data.aws_caller_identity.current[0].arn, "")
+}
+
+locals {
+  create = var.create && var.putin_khuylo
+
+  account_id = try(data.aws_caller_identity.current[0].account_id, "")
+  partition  = try(data.aws_partition.current[0].partition, "")
+
+  role_arn = try(aws_iam_role.this[0].arn, var.iam_role_arn)
+
+  create_outposts_local_cluster = var.outpost_config != null
+  enable_encryption_config      = var.encryption_config != null && !local.create_outposts_local_cluster
+
+  auto_mode_enabled = try(var.compute_config.enabled, false)
+}
+
+################################################################################
+# Cluster
+################################################################################
+
+resource "aws_eks_cluster" "this" {
+  count = local.create ? 1 : 0
+
+  region = var.region
+
+  name                          = var.name
+  role_arn                      = local.role_arn
+  version                       = var.kubernetes_version
+  enabled_cluster_log_types     = var.enabled_log_types
+  bootstrap_self_managed_addons = false
+  force_update_version          = var.force_update_version
+
+  access_config {
+    authentication_mode = var.authentication_mode
+
+    # See access entries below - this is a one time operation from the EKS API.
+    # Instead, we are hardcoding this to false and if users wish to achieve this
+    # same functionality, we will do that through an access entry which can be
+    # enabled or disabled at any time of their choosing using the variable
+    # var.enable_cluster_creator_admin_permissions
+    bootstrap_cluster_creator_admin_permissions = false
+  }
+
+  dynamic "compute_config" {
+    for_each = var.compute_config != null ? [var.compute_config] : []
+
+    content {
+      enabled       = compute_config.value.enabled
+      node_pools    = compute_config.value.node_pools
+      node_role_arn = compute_config.value.node_pools != null ? try(aws_iam_role.eks_auto[0].arn, compute_config.value.node_role_arn) : null
+    }
+  }
+
+  vpc_config {
+    security_group_ids      = compact(distinct(concat(var.additional_security_group_ids, [local.security_group_id])))
+    subnet_ids              = coalescelist(var.control_plane_subnet_ids, var.subnet_ids)
+    endpoint_private_access = var.endpoint_private_access
+    endpoint_public_access  = var.endpoint_public_access
+    public_access_cidrs     = var.endpoint_public_access_cidrs
+  }
+
+  dynamic "kubernetes_network_config" {
+    # Not valid on Outposts
+    for_each = local.create_outposts_local_cluster ? [] : [1]
+
+    content {
+      dynamic "elastic_load_balancing" {
+        for_each = local.auto_mode_enabled ? [1] : []
+
+        content {
+          enabled = local.auto_mode_enabled
+        }
+      }
+
+      ip_family         = var.ip_family
+      service_ipv4_cidr = var.service_ipv4_cidr
+      service_ipv6_cidr = var.service_ipv6_cidr
+    }
+  }
+
+  dynamic "outpost_config" {
+    for_each = local.create_outposts_local_cluster ? [var.outpost_config] : []
+
+    content {
+      control_plane_instance_type = outpost_config.value.control_plane_instance_type
+
+      dynamic "control_plane_placement" {
+        for_each = outpost_config.value.control_plane_placement != null ? [outpost_config.value.control_plane_placement] : []
+
+        content {
+          group_name = control_plane_placement.value.group_name
+        }
+      }
+
+      outpost_arns = outpost_config.value.outpost_arns
+    }
+  }
+
+  dynamic "encryption_config" {
+    # Not available on Outposts
+    for_each = local.enable_encryption_config ? [var.encryption_config] : []
+
+    content {
+      provider {
+        key_arn = var.create_kms_key ? module.kms.key_arn : encryption_config.value.provider_key_arn
+      }
+      resources = encryption_config.value.resources
+    }
+  }
+
+  dynamic "remote_network_config" {
+    # Not valid on Outposts
+    for_each = var.remote_network_config != null && !local.create_outposts_local_cluster ? [var.remote_network_config] : []
+
+    content {
+      dynamic "remote_node_networks" {
+        for_each = [remote_network_config.value.remote_node_networks]
+
+        content {
+          cidrs = remote_node_networks.value.cidrs
+        }
+      }
+
+      dynamic "remote_pod_networks" {
+        for_each = remote_network_config.value.remote_pod_networks != null ? [remote_network_config.value.remote_pod_networks] : []
+
+        content {
+          cidrs = remote_pod_networks.value.cidrs
+        }
+      }
+    }
+  }
+
+  dynamic "storage_config" {
+    for_each = local.auto_mode_enabled ? [1] : []
+
+    content {
+      block_storage {
+        enabled = local.auto_mode_enabled
+      }
+    }
+  }
+
+  dynamic "upgrade_policy" {
+    for_each = var.upgrade_policy != null ? [var.upgrade_policy] : []
+
+    content {
+      support_type = upgrade_policy.value.support_type
+    }
+  }
+
+  dynamic "zonal_shift_config" {
+    for_each = var.zonal_shift_config != null ? [var.zonal_shift_config] : []
+
+    content {
+      enabled = zonal_shift_config.value.enabled
+    }
+  }
+
+  tags = merge(
+    { terraform-aws-modules = "eks" },
+    var.tags,
+    var.cluster_tags,
+  )
+
+  dynamic "timeouts" {
+    for_each = var.timeouts != null ? [var.timeouts] : []
+
+    content {
+      create = var.timeouts.create
+      update = var.timeouts.update
+      delete = var.timeouts.delete
+    }
+  }
+
+  depends_on = [
+    aws_iam_role_policy_attachment.this,
+    aws_security_group_rule.cluster,
+    aws_security_group_rule.node,
+    aws_cloudwatch_log_group.this,
+    aws_iam_policy.cni_ipv6_policy,
+  ]
+
+  lifecycle {
+    ignore_changes = [
+      access_config[0].bootstrap_cluster_creator_admin_permissions,
+      bootstrap_self_managed_addons,
+    ]
+  }
+}
+
+resource "aws_ec2_tag" "cluster_primary_security_group" {
+  # This should not affect the name of the cluster primary security group
+  # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006
+  # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008
+  for_each = { for k, v in merge(var.tags, var.cluster_tags) :
+    k => v if local.create && k != "Name" && var.create_primary_security_group_tags
+  }
+
+  region = var.region
+
+  resource_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
+  key         = each.key
+  value       = each.value
+}
+
+resource "aws_cloudwatch_log_group" "this" {
+  count = local.create && var.create_cloudwatch_log_group ? 1 : 0
+
+  region = var.region
+
+  name              = "/aws/eks/${var.name}/cluster"
+  retention_in_days = var.cloudwatch_log_group_retention_in_days
+  kms_key_id        = var.cloudwatch_log_group_kms_key_id
+  log_group_class   = var.cloudwatch_log_group_class
+
+  tags = merge(
+    var.tags,
+    var.cloudwatch_log_group_tags,
+    { Name = "/aws/eks/${var.name}/cluster" }
+  )
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+locals {
+  # This replaces the one time logic from the EKS API with something that can be
+  # better controlled by users through Terraform
+  bootstrap_cluster_creator_admin_permissions = { for k, v in {
+    cluster_creator = {
+      principal_arn = try(data.aws_iam_session_context.current[0].issuer_arn, "")
+      type          = "STANDARD"
+
+      policy_associations = {
+        admin = {
+          policy_arn = "arn:${local.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
+          access_scope = {
+            type = "cluster"
+          }
+        }
+      }
+    }
+  } : k => v if var.enable_cluster_creator_admin_permissions }
+
+  # Merge the bootstrap behavior with the entries that users provide
+  merged_access_entries = merge(
+    local.bootstrap_cluster_creator_admin_permissions,
+    var.access_entries,
+  )
+
+  # Flatten out entries and policy associations so users can specify the policy
+  # associations within a single entry
+  flattened_access_entries = flatten([
+    for entry_key, entry_val in local.merged_access_entries : [
+      for pol_key, pol_val in try(entry_val.policy_associations, {}) :
+      merge(
+        {
+          principal_arn = entry_val.principal_arn
+          entry_key     = entry_key
+          pol_key       = pol_key
+        },
+        { for k, v in {
+          association_policy_arn              = pol_val.policy_arn
+          association_access_scope_type       = pol_val.access_scope.type
+          association_access_scope_namespaces = try(pol_val.access_scope.namespaces, null)
+        } : k => v if !contains(["EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX", "HYBRID_LINUX"], lookup(entry_val, "type", "STANDARD")) },
+      )
+    ]
+  ])
+}
+
+resource "aws_eks_access_entry" "this" {
+  for_each = { for k, v in local.merged_access_entries : k => v if local.create }
+
+  region = var.region
+
+  cluster_name      = aws_eks_cluster.this[0].id
+  kubernetes_groups = try(each.value.kubernetes_groups, null)
+  principal_arn     = each.value.principal_arn
+  type              = try(each.value.type, null)
+  user_name         = try(each.value.user_name, null)
+
+  tags = merge(
+    var.tags,
+    try(each.value.tags, {}),
+  )
+}
+
+resource "aws_eks_access_policy_association" "this" {
+  for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if local.create }
+
+  region = var.region
+
+  access_scope {
+    namespaces = each.value.association_access_scope_namespaces
+    type       = each.value.association_access_scope_type
+  }
+
+  cluster_name = aws_eks_cluster.this[0].id
+
+  policy_arn    = each.value.association_policy_arn
+  principal_arn = each.value.principal_arn
+
+  depends_on = [
+    aws_eks_access_entry.this,
+  ]
+}
+
+################################################################################
+# KMS Key
+################################################################################
+
+module "kms" {
+  source  = "terraform-aws-modules/kms/aws"
+  version = "4.0.0" # Note - be mindful of Terraform/provider version compatibility between modules
+
+  create = local.create && var.create_kms_key && local.enable_encryption_config # not valid on Outposts
+
+  region = var.region
+
+  description             = coalesce(var.kms_key_description, "${var.name} cluster encryption key")
+  key_usage               = "ENCRYPT_DECRYPT"
+  deletion_window_in_days = var.kms_key_deletion_window_in_days
+  enable_key_rotation     = var.enable_kms_key_rotation
+
+  # Policy
+  enable_default_policy     = var.kms_key_enable_default_policy
+  key_owners                = var.kms_key_owners
+  key_administrators        = coalescelist(var.kms_key_administrators, [try(data.aws_iam_session_context.current[0].issuer_arn, "")])
+  key_users                 = concat([local.role_arn], var.kms_key_users)
+  key_service_users         = var.kms_key_service_users
+  source_policy_documents   = var.kms_key_source_policy_documents
+  override_policy_documents = var.kms_key_override_policy_documents
+
+  # Aliases
+  aliases = var.kms_key_aliases
+  computed_aliases = {
+    # Computed since users can pass in computed values for cluster name such as random provider resources
+    cluster = { name = "eks/${var.name}" }
+  }
+
+  tags = merge(
+    { terraform-aws-modules = "eks" },
+    var.tags,
+  )
+}
+
+################################################################################
+# Cluster Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
+################################################################################
+
+locals {
+  security_group_name   = coalesce(var.security_group_name, "${var.name}-cluster")
+  create_security_group = local.create && var.create_security_group
+
+  security_group_id = local.create_security_group ? aws_security_group.cluster[0].id : var.security_group_id
+
+  # Do not add rules to node security group if the module is not creating it
+  cluster_security_group_rules = { for k, v in {
+    ingress_nodes_443 = {
+      description                = "Node groups to cluster API"
+      protocol                   = "tcp"
+      from_port                  = 443
+      to_port                    = 443
+      type                       = "ingress"
+      source_node_security_group = true
+    }
+  } : k => v if local.create_node_sg }
+}
+
+resource "aws_security_group" "cluster" {
+  count = local.create_security_group ? 1 : 0
+
+  region = var.region
+
+  name        = var.security_group_use_name_prefix ? null : local.security_group_name
+  name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}${var.prefix_separator}" : null
+  description = var.security_group_description
+  vpc_id      = var.vpc_id
+
+  tags = merge(
+    var.tags,
+    { "Name" = local.security_group_name },
+    var.security_group_tags
+  )
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+resource "aws_security_group_rule" "cluster" {
+  for_each = { for k, v in merge(
+    local.cluster_security_group_rules,
+    var.security_group_additional_rules
+  ) : k => v if local.create_security_group }
+
+  region = var.region
+
+  security_group_id        = aws_security_group.cluster[0].id
+  protocol                 = each.value.protocol
+  from_port                = each.value.from_port
+  to_port                  = each.value.to_port
+  type                     = each.value.type
+  description              = try(each.value.description, null)
+  cidr_blocks              = try(each.value.cidr_blocks, null)
+  ipv6_cidr_blocks         = try(each.value.ipv6_cidr_blocks, null)
+  prefix_list_ids          = try(each.value.prefix_list_ids, null)
+  self                     = try(each.value.self, null)
+  source_security_group_id = try(each.value.source_node_security_group, false) ? local.node_security_group_id : try(each.value.source_security_group_id, null)
+}
+
+################################################################################
+# IRSA
+# Note - this is different from EKS identity provider
+################################################################################
+
+locals {
+  # Not available on outposts
+  create_oidc_provider = local.create && var.enable_irsa && !local.create_outposts_local_cluster
+
+  oidc_root_ca_thumbprint = local.create_oidc_provider && var.include_oidc_root_ca_thumbprint ? [data.tls_certificate.this[0].certificates[0].sha1_fingerprint] : []
+}
+
+data "tls_certificate" "this" {
+  # Not available on outposts
+  count = local.create_oidc_provider && var.include_oidc_root_ca_thumbprint ? 1 : 0
+
+  url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
+}
+
+resource "aws_iam_openid_connect_provider" "oidc_provider" {
+  # Not available on outposts
+  count = local.create_oidc_provider ? 1 : 0
+
+  client_id_list  = distinct(compact(concat(["sts.amazonaws.com"], var.openid_connect_audiences)))
+  thumbprint_list = concat(local.oidc_root_ca_thumbprint, var.custom_oidc_thumbprints)
+  url             = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
+
+  tags = merge(
+    { Name = "${var.name}-eks-irsa" },
+    var.tags
+  )
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+locals {
+  create_iam_role        = local.create && var.create_iam_role
+  iam_role_name          = coalesce(var.iam_role_name, "${var.name}-cluster")
+  iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy"
+
+  cluster_encryption_policy_name = coalesce(var.encryption_policy_name, "${local.iam_role_name}-ClusterEncryption")
+
+  # Standard EKS cluster
+  eks_standard_iam_role_policies = { for k, v in {
+    AmazonEKSClusterPolicy = "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy",
+  } : k => v if !local.create_outposts_local_cluster && !local.auto_mode_enabled }
+
+  # EKS cluster with EKS auto mode enabled
+  eks_auto_mode_iam_role_policies = { for k, v in {
+    AmazonEKSClusterPolicy       = "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy"
+    AmazonEKSComputePolicy       = "${local.iam_role_policy_prefix}/AmazonEKSComputePolicy"
+    AmazonEKSBlockStoragePolicy  = "${local.iam_role_policy_prefix}/AmazonEKSBlockStoragePolicy"
+    AmazonEKSLoadBalancingPolicy = "${local.iam_role_policy_prefix}/AmazonEKSLoadBalancingPolicy"
+    AmazonEKSNetworkingPolicy    = "${local.iam_role_policy_prefix}/AmazonEKSNetworkingPolicy"
+  } : k => v if !local.create_outposts_local_cluster && local.auto_mode_enabled }
+
+  # EKS local cluster on Outposts
+  eks_outpost_iam_role_policies = { for k, v in {
+    AmazonEKSClusterPolicy = "${local.iam_role_policy_prefix}/AmazonEKSLocalOutpostClusterPolicy"
+  } : k => v if local.create_outposts_local_cluster && !local.auto_mode_enabled }
+}
+
+data "aws_iam_policy_document" "assume_role_policy" {
+  count = local.create && var.create_iam_role ? 1 : 0
+
+  statement {
+    sid = "EKSClusterAssumeRole"
+    actions = [
+      "sts:AssumeRole",
+      "sts:TagSession",
+    ]
+
+    principals {
+      type        = "Service"
+      identifiers = ["eks.amazonaws.com"]
+    }
+
+    dynamic "principals" {
+      for_each = local.create_outposts_local_cluster ? [1] : []
+
+      content {
+        type        = "Service"
+        identifiers = ["ec2.amazonaws.com"]
+      }
+    }
+  }
+}
+
+resource "aws_iam_role" "this" {
+  count = local.create_iam_role ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}${var.prefix_separator}" : null
+  path        = var.iam_role_path
+  description = var.iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.assume_role_policy[0].json
+  permissions_boundary  = var.iam_role_permissions_boundary
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.iam_role_tags)
+}
+
+# Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html
+resource "aws_iam_role_policy_attachment" "this" {
+  for_each = { for k, v in merge(
+    local.eks_standard_iam_role_policies,
+    local.eks_auto_mode_iam_role_policies,
+    local.eks_outpost_iam_role_policies,
+  ) : k => v if local.create_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "additional" {
+  for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+# Using separate attachment due to `The "for_each" value depends on resource attributes that cannot be determined until apply`
+resource "aws_iam_role_policy_attachment" "cluster_encryption" {
+  # Encryption config not available on Outposts
+  count = local.create_iam_role && var.attach_encryption_policy && local.enable_encryption_config ? 1 : 0
+
+  policy_arn = aws_iam_policy.cluster_encryption[0].arn
+  role       = aws_iam_role.this[0].name
+}
+
+resource "aws_iam_policy" "cluster_encryption" {
+  # Encryption config not available on Outposts
+  count = local.create_iam_role && var.attach_encryption_policy && local.enable_encryption_config ? 1 : 0
+
+  name        = var.encryption_policy_use_name_prefix ? null : local.cluster_encryption_policy_name
+  name_prefix = var.encryption_policy_use_name_prefix ? local.cluster_encryption_policy_name : null
+  description = var.encryption_policy_description
+  path        = var.encryption_policy_path
+
+  policy = jsonencode({
+    Version = "2012-10-17"
+    Statement = [
+      {
+        Action = [
+          "kms:Encrypt",
+          "kms:Decrypt",
+          "kms:ListGrants",
+          "kms:DescribeKey",
+        ]
+        Effect   = "Allow"
+        Resource = var.create_kms_key ? module.kms.key_arn : var.encryption_config.provider_key_arn
+      },
+    ]
+  })
+
+  tags = merge(var.tags, var.encryption_policy_tags)
+}
+
+data "aws_iam_policy_document" "custom" {
+  count = local.create_iam_role && local.auto_mode_enabled && var.enable_auto_mode_custom_tags ? 1 : 0
+
+  dynamic "statement" {
+    for_each = var.enable_auto_mode_custom_tags ? [1] : []
+
+    content {
+      sid = "Compute"
+      actions = [
+        "ec2:CreateFleet",
+        "ec2:RunInstances",
+        "ec2:CreateLaunchTemplate",
+      ]
+      resources = ["*"]
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:eks-cluster-name"
+        values   = ["$${aws:PrincipalTag/eks:eks-cluster-name}"]
+      }
+
+      condition {
+        test     = "StringLike"
+        variable = "aws:RequestTag/eks:kubernetes-node-class-name"
+        values   = ["*"]
+      }
+
+      condition {
+        test     = "StringLike"
+        variable = "aws:RequestTag/eks:kubernetes-node-pool-name"
+        values   = ["*"]
+      }
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_auto_mode_custom_tags ? [1] : []
+
+    content {
+      sid = "Storage"
+      actions = [
+        "ec2:CreateVolume",
+        "ec2:CreateSnapshot",
+      ]
+      resources = [
+        "arn:${local.partition}:ec2:*:*:volume/*",
+        "arn:${local.partition}:ec2:*:*:snapshot/*",
+      ]
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:eks-cluster-name"
+        values   = ["$${aws:PrincipalTag/eks:eks-cluster-name}"]
+      }
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_auto_mode_custom_tags ? [1] : []
+
+    content {
+      sid       = "Networking"
+      actions   = ["ec2:CreateNetworkInterface"]
+      resources = ["*"]
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:eks-cluster-name"
+        values   = ["$${aws:PrincipalTag/eks:eks-cluster-name}"]
+      }
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:kubernetes-cni-node-name"
+        values   = ["*"]
+      }
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_auto_mode_custom_tags ? [1] : []
+
+    content {
+      sid = "LoadBalancer"
+      actions = [
+        "elasticloadbalancing:CreateLoadBalancer",
+        "elasticloadbalancing:CreateTargetGroup",
+        "elasticloadbalancing:CreateListener",
+        "elasticloadbalancing:CreateRule",
+        "ec2:CreateSecurityGroup",
+      ]
+      resources = ["*"]
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:eks-cluster-name"
+        values   = ["$${aws:PrincipalTag/eks:eks-cluster-name}"]
+      }
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_auto_mode_custom_tags ? [1] : []
+
+    content {
+      sid       = "ShieldProtection"
+      actions   = ["shield:CreateProtection"]
+      resources = ["*"]
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:eks-cluster-name"
+        values   = ["$${aws:PrincipalTag/eks:eks-cluster-name}"]
+      }
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_auto_mode_custom_tags ? [1] : []
+
+    content {
+      sid       = "ShieldTagResource"
+      actions   = ["shield:TagResource"]
+      resources = ["arn:${local.partition}:shield::*:protection/*"]
+
+      condition {
+        test     = "StringEquals"
+        variable = "aws:RequestTag/eks:eks-cluster-name"
+        values   = ["$${aws:PrincipalTag/eks:eks-cluster-name}"]
+      }
+    }
+  }
+}
+
+resource "aws_iam_policy" "custom" {
+  count = local.create_iam_role && local.auto_mode_enabled && var.enable_auto_mode_custom_tags ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  path        = var.iam_role_path
+  description = var.iam_role_description
+
+  policy = data.aws_iam_policy_document.custom[0].json
+
+  tags = merge(var.tags, var.iam_role_tags)
+}
+
+resource "aws_iam_role_policy_attachment" "custom" {
+  count = local.create_iam_role && local.auto_mode_enabled && var.enable_auto_mode_custom_tags ? 1 : 0
+
+  policy_arn = aws_iam_policy.custom[0].arn
+  role       = aws_iam_role.this[0].name
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+data "aws_eks_addon_version" "this" {
+  for_each = var.addons != null && local.create && !local.create_outposts_local_cluster ? var.addons : {}
+
+  region = var.region
+
+  addon_name         = coalesce(each.value.name, each.key)
+  kubernetes_version = coalesce(var.kubernetes_version, aws_eks_cluster.this[0].version)
+  most_recent        = each.value.most_recent
+}
+
+resource "aws_eks_addon" "this" {
+  # Not supported on outposts
+  for_each = var.addons != null && local.create && !local.create_outposts_local_cluster ? { for k, v in var.addons : k => v if !v.before_compute } : {}
+
+  region = var.region
+
+  cluster_name = aws_eks_cluster.this[0].id
+  addon_name   = coalesce(each.value.name, each.key)
+
+  addon_version        = coalesce(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version)
+  configuration_values = each.value.configuration_values
+
+  dynamic "pod_identity_association" {
+    for_each = each.value.pod_identity_association != null ? each.value.pod_identity_association : []
+
+    content {
+      role_arn        = pod_identity_association.value.role_arn
+      service_account = pod_identity_association.value.service_account
+    }
+  }
+
+  preserve                    = each.value.preserve
+  resolve_conflicts_on_create = each.value.resolve_conflicts_on_create
+  resolve_conflicts_on_update = each.value.resolve_conflicts_on_update
+  service_account_role_arn    = each.value.service_account_role_arn
+
+  timeouts {
+    create = try(coalesce(each.value.timeouts.create, var.addons_timeouts.create), null)
+    update = try(coalesce(each.value.timeouts.update, var.addons_timeouts.update), null)
+    delete = try(coalesce(each.value.timeouts.delete, var.addons_timeouts.delete), null)
+  }
+
+  tags = merge(
+    var.tags,
+    each.value.tags,
+  )
+
+  # before_compute = false
+  depends_on = [
+    module.fargate_profile,
+    module.eks_managed_node_group,
+    module.self_managed_node_group,
+  ]
+}
+
+resource "aws_eks_addon" "before_compute" {
+  # Not supported on outposts
+  for_each = var.addons != null && local.create && !local.create_outposts_local_cluster ? { for k, v in var.addons : k => v if v.before_compute } : {}
+
+  region = var.region
+
+  cluster_name = aws_eks_cluster.this[0].id
+  addon_name   = coalesce(each.value.name, each.key)
+
+  addon_version        = coalesce(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version)
+  configuration_values = each.value.configuration_values
+
+  dynamic "pod_identity_association" {
+    for_each = each.value.pod_identity_association != null ? each.value.pod_identity_association : []
+
+    content {
+      role_arn        = pod_identity_association.value.role_arn
+      service_account = pod_identity_association.value.service_account
+    }
+  }
+
+  preserve                    = each.value.preserve
+  resolve_conflicts_on_create = each.value.resolve_conflicts_on_create
+  resolve_conflicts_on_update = each.value.resolve_conflicts_on_update
+  service_account_role_arn    = each.value.service_account_role_arn
+
+  timeouts {
+    create = try(coalesce(each.value.timeouts.create, var.addons_timeouts.create), null)
+    update = try(coalesce(each.value.timeouts.update, var.addons_timeouts.update), null)
+    delete = try(coalesce(each.value.timeouts.delete, var.addons_timeouts.delete), null)
+  }
+
+  tags = merge(
+    var.tags,
+    each.value.tags,
+  )
+}
+
+################################################################################
+# EKS Identity Provider
+# Note - this is different from IRSA
+################################################################################
+
+resource "aws_eks_identity_provider_config" "this" {
+  for_each = var.identity_providers != null && local.create && !local.create_outposts_local_cluster ? var.identity_providers : {}
+
+  region = var.region
+
+  cluster_name = aws_eks_cluster.this[0].id
+
+  oidc {
+    client_id                     = each.value.client_id
+    groups_claim                  = each.value.groups_claim
+    groups_prefix                 = each.value.groups_prefix
+    identity_provider_config_name = coalesce(each.value.identity_provider_config_name, each.key)
+    issuer_url                    = each.value.issuer_url
+    required_claims               = each.value.required_claims
+    username_claim                = each.value.username_claim
+    username_prefix               = each.value.username_prefix
+  }
+
+  tags = merge(
+    var.tags,
+    each.value.tags,
+  )
+}
+
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+locals {
+  create_node_iam_role = local.create && var.create_node_iam_role && local.auto_mode_enabled
+  node_iam_role_name   = coalesce(var.node_iam_role_name, "${var.name}-eks-auto")
+}
+
+data "aws_iam_policy_document" "node_assume_role_policy" {
+  count = local.create_node_iam_role ? 1 : 0
+
+  statement {
+    sid = "EKSAutoNodeAssumeRole"
+    actions = [
+      "sts:AssumeRole",
+      "sts:TagSession",
+    ]
+
+    principals {
+      type        = "Service"
+      identifiers = ["ec2.amazonaws.com"]
+    }
+  }
+}
+
+resource "aws_iam_role" "eks_auto" {
+  count = local.create_node_iam_role ? 1 : 0
+
+  name        = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name
+  name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null
+  path        = var.node_iam_role_path
+  description = var.node_iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.node_assume_role_policy[0].json
+  permissions_boundary  = var.node_iam_role_permissions_boundary
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.node_iam_role_tags)
+}
+
+# Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html
+resource "aws_iam_role_policy_attachment" "eks_auto" {
+  for_each = { for k, v in {
+    AmazonEKSWorkerNodeMinimalPolicy   = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodeMinimalPolicy",
+    AmazonEC2ContainerRegistryPullOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryPullOnly",
+  } : k => v if local.create_node_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.eks_auto[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "eks_auto_additional" {
+  for_each = { for k, v in var.node_iam_role_additional_policies : k => v if local.create_node_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.eks_auto[0].name
+}
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 0000000000..cc20c604b2
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,65 @@
+site_name: Terraform AWS EKS
+docs_dir: docs/
+site_url: https://terraform-aws-modules/terraform-aws-eks/
+repo_name: terraform-aws-eks
+repo_url: https://github.com/terraform-aws-modules/terraform-aws-eks
+
+theme:
+  name: material
+  logo: assets/terraform-aws.png
+  favicon: assets/logo.png
+  font:
+    text: ember
+  palette:
+    primary: deep purple
+    accent: indgo
+  icon:
+    repo: fontawesome/brands/github
+    admonition:
+      note: octicons/tag-16
+      abstract: octicons/checklist-16
+      info: octicons/info-16
+      tip: octicons/squirrel-16
+      success: octicons/check-16
+      question: octicons/question-16
+      warning: octicons/alert-16
+      failure: octicons/x-circle-16
+      danger: octicons/zap-16
+      bug: octicons/bug-16
+      example: octicons/beaker-16
+      quote: octicons/quote-16
+  features:
+    - navigation.tabs.sticky
+  highlightjs: true
+  hljs_languages:
+    - yaml
+    - json
+
+plugins:
+  - include-markdown
+  - search:
+      lang:
+        - en
+  - awesome-pages
+
+extra:
+  version:
+    provider: mike
+
+markdown_extensions:
+  - attr_list
+  - admonition
+  - codehilite
+  - footnotes
+  - md_in_html
+  - pymdownx.critic
+  - pymdownx.details
+  - pymdownx.highlight:
+      anchor_linenums: true
+      line_spans: __span
+      pygments_lang_class: true
+  - pymdownx.inlinehilite
+  - pymdownx.snippets
+  - pymdownx.superfences
+  - toc:
+      permalink: true
diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md
new file mode 100644
index 0000000000..610051bc9c
--- /dev/null
+++ b/modules/_user_data/README.md
@@ -0,0 +1,61 @@
+# User Data Module
+
+Configuration in this directory renders the appropriate user data for the given inputs. See [`docs/user_data.md`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md) for more info.
+
+See [`tests/user-data/`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/tests/user-data) for various tests cases using this module.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_cloudinit"></a> [cloudinit](#requirement\_cloudinit) | >= 2.0 |
+| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_cloudinit"></a> [cloudinit](#provider\_cloudinit) | >= 2.0 |
+| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [null_resource.validate_cluster_service_cidr](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [cloudinit_config.al2023_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+| [cloudinit_config.al2_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_additional_cluster_dns_ips"></a> [additional\_cluster\_dns\_ips](#input\_additional\_cluster\_dns\_ips) | Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*` | `list(string)` | `[]` | no |
+| <a name="input_ami_type"></a> [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `"AL2023_x86_64_STANDARD"` | no |
+| <a name="input_bootstrap_extra_args"></a> [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
+| <a name="input_cloudinit_post_nodeadm"></a> [cloudinit\_post\_nodeadm](#input\_cloudinit\_post\_nodeadm) | Array of cloud-init document parts that are created after the nodeadm document part | <pre>list(object({<br/>    content      = string<br/>    content_type = optional(string)<br/>    filename     = optional(string)<br/>    merge_type   = optional(string)<br/>  }))</pre> | `[]` | no |
+| <a name="input_cloudinit_pre_nodeadm"></a> [cloudinit\_pre\_nodeadm](#input\_cloudinit\_pre\_nodeadm) | Array of cloud-init document parts that are created before the nodeadm document part | <pre>list(object({<br/>    content      = string<br/>    content_type = optional(string)<br/>    filename     = optional(string)<br/>    merge_type   = optional(string)<br/>  }))</pre> | `[]` | no |
+| <a name="input_cluster_auth_base64"></a> [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
+| <a name="input_cluster_endpoint"></a> [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
+| <a name="input_cluster_ip_family"></a> [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no |
+| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| <a name="input_cluster_service_cidr"></a> [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself | `string` | `""` | no |
+| <a name="input_create"></a> [create](#input\_create) | Determines whether to create user-data or not | `bool` | `true` | no |
+| <a name="input_enable_bootstrap_user_data"></a> [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
+| <a name="input_is_eks_managed_node_group"></a> [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no |
+| <a name="input_post_bootstrap_user_data"></a> [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no |
+| <a name="input_pre_bootstrap_user_data"></a> [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no |
+| <a name="input_user_data_template_path"></a> [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_user_data"></a> [user\_data](#output\_user\_data) | Base64 encoded user data rendered for the provided inputs |
+<!-- END_TF_DOCS -->
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
new file mode 100644
index 0000000000..c394421c4c
--- /dev/null
+++ b/modules/_user_data/main.tf
@@ -0,0 +1,136 @@
+# The `cluster_service_cidr` is required when `create == true`
+# This is a hacky way to make that logic work, otherwise Terraform always wants a value
+# and supplying any old value like `""` or `null` is not valid and will silently
+# fail to join nodes to the cluster
+resource "null_resource" "validate_cluster_service_cidr" {
+  lifecycle {
+    precondition {
+      # The length 6 is currently arbitrary, but it's a safe bet that the CIDR will be longer than that
+      # The main point is that a value needs to be provided when `create = true`
+      condition     = var.create ? length(var.cluster_service_cidr) > 6 : true
+      error_message = "`cluster_service_cidr` is required when `create = true`."
+    }
+  }
+}
+
+locals {
+  is_al2    = startswith(var.ami_type, "AL2_")
+  is_al2023 = startswith(var.ami_type, "AL2023_")
+
+  # Converts AMI type into user data template path
+  ami_type_to_user_data_path = {
+    AL2_ARM_64     = "${path.module}/../../templates/al2_user_data.tpl"
+    AL2_x86_64     = "${path.module}/../../templates/al2_user_data.tpl"
+    AL2_x86_64_GPU = "${path.module}/../../templates/al2_user_data.tpl"
+
+    AL2023_x86_64_STANDARD = "${path.module}/../../templates/al2023_user_data.tpl"
+    AL2023_ARM_64_STANDARD = "${path.module}/../../templates/al2023_user_data.tpl"
+    AL2023_x86_64_NEURON   = "${path.module}/../../templates/al2023_user_data.tpl"
+    AL2023_x86_64_NVIDIA   = "${path.module}/../../templates/al2023_user_data.tpl"
+    AL2023_ARM_64_NVIDIA   = "${path.module}/../../templates/al2023_user_data.tpl"
+
+    BOTTLEROCKET_ARM_64        = "${path.module}/../../templates/bottlerocket_user_data.tpl"
+    BOTTLEROCKET_x86_64        = "${path.module}/../../templates/bottlerocket_user_data.tpl"
+    BOTTLEROCKET_ARM_64_FIPS   = "${path.module}/../../templates/bottlerocket_user_data.tpl"
+    BOTTLEROCKET_x86_64_FIPS   = "${path.module}/../../templates/bottlerocket_user_data.tpl"
+    BOTTLEROCKET_ARM_64_NVIDIA = "${path.module}/../../templates/bottlerocket_user_data.tpl"
+    BOTTLEROCKET_x86_64_NVIDIA = "${path.module}/../../templates/bottlerocket_user_data.tpl"
+
+    WINDOWS_CORE_2019_x86_64 = "${path.module}/../../templates/windows_user_data.tpl"
+    WINDOWS_FULL_2019_x86_64 = "${path.module}/../../templates/windows_user_data.tpl"
+    WINDOWS_CORE_2022_x86_64 = "${path.module}/../../templates/windows_user_data.tpl"
+    WINDOWS_FULL_2022_x86_64 = "${path.module}/../../templates/windows_user_data.tpl"
+
+    CUSTOM = var.user_data_template_path
+  }
+  user_data_path = coalesce(var.user_data_template_path, local.ami_type_to_user_data_path[var.ami_type])
+
+  cluster_dns_ips = flatten(concat([try(cidrhost(var.cluster_service_cidr, 10), "")], var.additional_cluster_dns_ips))
+
+  user_data = var.create ? base64encode(templatefile(local.user_data_path,
+    {
+      # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+      enable_bootstrap_user_data = var.enable_bootstrap_user_data
+
+      # Required to bootstrap node
+      cluster_name        = var.cluster_name
+      cluster_endpoint    = var.cluster_endpoint
+      cluster_auth_base64 = var.cluster_auth_base64
+
+      cluster_service_cidr = var.cluster_service_cidr
+      cluster_ip_family    = var.cluster_ip_family
+
+      # Bottlerocket
+      cluster_dns_ips = "[${join(", ", formatlist("\"%s\"", local.cluster_dns_ips))}]"
+
+      # Optional
+      bootstrap_extra_args     = var.bootstrap_extra_args
+      pre_bootstrap_user_data  = var.pre_bootstrap_user_data
+      post_bootstrap_user_data = var.post_bootstrap_user_data
+    }
+  )) : ""
+
+  user_data_type_to_rendered = try(coalesce(
+    local.is_al2 ? try(data.cloudinit_config.al2_eks_managed_node_group[0].rendered, local.user_data) : null,
+    local.is_al2023 ? try(data.cloudinit_config.al2023_eks_managed_node_group[0].rendered, local.user_data) : null,
+    local.user_data,
+  ), "")
+}
+
+# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
+# Managed node group data must in MIME multi-part archive format,
+# as by default, EKS will merge the bootstrapping command required for nodes to join the
+# cluster with your user data. If you use a custom AMI in your launch template,
+# this merging will NOT happen and you are responsible for nodes joining the cluster.
+# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
+
+data "cloudinit_config" "al2_eks_managed_node_group" {
+  count = var.create && local.is_al2 && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0
+
+  base64_encode = true
+  gzip          = false
+  boundary      = "//"
+
+  # Prepend to existing user data supplied by AWS EKS
+  part {
+    content      = var.pre_bootstrap_user_data
+    content_type = "text/x-shellscript"
+  }
+}
+
+# Scenarios:
+#
+# 1. Do nothing - provide nothing
+# 2. Prepend stuff on EKS MNG (before EKS MNG adds its bit at the end)
+# 3. Own all of the stuff on self-MNG or EKS MNG w/ custom AMI
+
+locals {
+  nodeadm_cloudinit = var.enable_bootstrap_user_data ? concat(
+    var.cloudinit_pre_nodeadm,
+    [{
+      content_type = "application/node.eks.aws"
+      content      = base64decode(local.user_data)
+    }],
+    var.cloudinit_post_nodeadm
+  ) : var.cloudinit_pre_nodeadm
+}
+
+data "cloudinit_config" "al2023_eks_managed_node_group" {
+  count = var.create && local.is_al2023 && length(local.nodeadm_cloudinit) > 0 ? 1 : 0
+
+  base64_encode = true
+  gzip          = false
+  boundary      = "MIMEBOUNDARY"
+
+  dynamic "part" {
+    # Using the index is fine in this context since any change in user data will be a replacement
+    for_each = { for i, v in local.nodeadm_cloudinit : i => v }
+
+    content {
+      content      = part.value.content
+      content_type = try(part.value.content_type, null)
+      filename     = try(part.value.filename, null)
+      merge_type   = try(part.value.merge_type, null)
+    }
+  }
+}
diff --git a/modules/_user_data/outputs.tf b/modules/_user_data/outputs.tf
new file mode 100644
index 0000000000..dda4b5195d
--- /dev/null
+++ b/modules/_user_data/outputs.tf
@@ -0,0 +1,4 @@
+output "user_data" {
+  description = "Base64 encoded user data rendered for the provided inputs"
+  value       = local.user_data_type_to_rendered
+}
diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf
new file mode 100644
index 0000000000..bfc32ab688
--- /dev/null
+++ b/modules/_user_data/variables.tf
@@ -0,0 +1,121 @@
+variable "create" {
+  description = "Determines whether to create user-data or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "ami_type" {
+  description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values"
+  type        = string
+  default     = "AL2023_x86_64_STANDARD"
+  nullable    = false
+}
+
+variable "enable_bootstrap_user_data" {
+  description = "Determines whether the bootstrap configurations are populated within the user data template"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "is_eks_managed_node_group" {
+  description = "Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "cluster_name" {
+  description = "Name of the EKS cluster"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "cluster_endpoint" {
+  description = "Endpoint of associated EKS cluster"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "cluster_auth_base64" {
+  description = "Base64 encoded CA of associated EKS cluster"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "cluster_service_cidr" {
+  description = "The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "cluster_ip_family" {
+  description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`"
+  type        = string
+  default     = "ipv4"
+  nullable    = false
+}
+
+variable "additional_cluster_dns_ips" {
+  description = "Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "pre_bootstrap_user_data" {
+  description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "post_bootstrap_user_data" {
+  description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "bootstrap_extra_args" {
+  description = "Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "user_data_template_path" {
+  description = "Path to a local, custom user data template file to use when rendering user data"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "cloudinit_pre_nodeadm" {
+  description = "Array of cloud-init document parts that are created before the nodeadm document part"
+  type = list(object({
+    content      = string
+    content_type = optional(string)
+    filename     = optional(string)
+    merge_type   = optional(string)
+  }))
+  default  = []
+  nullable = false
+}
+
+variable "cloudinit_post_nodeadm" {
+  description = "Array of cloud-init document parts that are created after the nodeadm document part"
+  type = list(object({
+    content      = string
+    content_type = optional(string)
+    filename     = optional(string)
+    merge_type   = optional(string)
+  }))
+  default  = []
+  nullable = false
+}
diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf
new file mode 100644
index 0000000000..a9802b0dea
--- /dev/null
+++ b/modules/_user_data/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    cloudinit = {
+      source  = "hashicorp/cloudinit"
+      version = ">= 2.0"
+    }
+    null = {
+      source  = "hashicorp/null"
+      version = ">= 3.0"
+    }
+  }
+}
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
new file mode 100644
index 0000000000..05eacc433c
--- /dev/null
+++ b/modules/eks-managed-node-group/README.md
@@ -0,0 +1,221 @@
+# EKS Managed Node Group Module
+
+Configuration in this directory creates an EKS Managed Node Group along with an IAM role, security group, and launch template
+
+## Usage
+
+```hcl
+module "eks_managed_node_group" {
+  source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group"
+
+  name            = "separate-eks-mng"
+  cluster_name    = "my-cluster"
+  cluster_version = "1.31"
+
+  subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+  // The following variables are necessary if you decide to use the module outside of the parent EKS module context.
+  // Without it, the security groups of the nodes are empty and thus won't join the cluster.
+  cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id
+  vpc_security_group_ids            = [module.eks.node_security_group_id]
+
+  // Note: `disk_size`, and `remote_access` can only be set when using the EKS managed node group default launch template
+  // This module defaults to providing a custom launch template to allow for custom security groups, tag propagation, etc.
+  // use_custom_launch_template = false
+  // disk_size = 50
+  //
+  //  # Remote access cannot be specified with a launch template
+  //  remote_access = {
+  //    ec2_ssh_key               = module.key_pair.key_pair_name
+  //    source_security_group_ids = [aws_security_group.remote_access.id]
+  //  }
+
+  min_size     = 1
+  max_size     = 10
+  desired_size = 1
+
+  instance_types = ["t3.large"]
+  capacity_type  = "SPOT"
+
+  labels = {
+    Environment = "test"
+    GithubRepo  = "terraform-aws-eks"
+    GithubOrg   = "terraform-aws-modules"
+  }
+
+  taints = {
+    dedicated = {
+      key    = "dedicated"
+      value  = "gpuGroup"
+      effect = "NO_SCHEDULE"
+    }
+  }
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_user_data"></a> [user\_data](#module\_user\_data) | ../_user_data | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource |
+| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_placement_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/placement_group) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_vpc_security_group_egress_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_egress_rule) | resource |
+| [aws_vpc_security_group_ingress_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_ec2_instance_type.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_instance_type) | data source |
+| [aws_eks_cluster_versions.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_versions) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [aws_ssm_parameter.ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source |
+| [aws_subnet.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_account_id"></a> [account\_id](#input\_account\_id) | The AWS account ID - pass through value to reduce number of GET requests from data sources | `string` | `""` | no |
+| <a name="input_ami_id"></a> [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance. If not supplied, EKS will use its own default image | `string` | `""` | no |
+| <a name="input_ami_release_version"></a> [ami\_release\_version](#input\_ami\_release\_version) | The AMI version. Defaults to latest AMI release version for the given Kubernetes version and AMI type | `string` | `null` | no |
+| <a name="input_ami_type"></a> [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `"AL2023_x86_64_STANDARD"` | no |
+| <a name="input_block_device_mappings"></a> [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | <pre>map(object({<br/>    device_name = optional(string)<br/>    ebs = optional(object({<br/>      delete_on_termination      = optional(bool)<br/>      encrypted                  = optional(bool)<br/>      iops                       = optional(number)<br/>      kms_key_id                 = optional(string)<br/>      snapshot_id                = optional(string)<br/>      throughput                 = optional(number)<br/>      volume_initialization_rate = optional(number)<br/>      volume_size                = optional(number)<br/>      volume_type                = optional(string)<br/>    }))<br/>    no_device    = optional(string)<br/>    virtual_name = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_bootstrap_extra_args"></a> [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `null` | no |
+| <a name="input_capacity_reservation_specification"></a> [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | <pre>object({<br/>    capacity_reservation_preference = optional(string)<br/>    capacity_reservation_target = optional(object({<br/>      capacity_reservation_id                 = optional(string)<br/>      capacity_reservation_resource_group_arn = optional(string)<br/>    }))<br/>  })</pre> | `null` | no |
+| <a name="input_capacity_type"></a> [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no |
+| <a name="input_cloudinit_post_nodeadm"></a> [cloudinit\_post\_nodeadm](#input\_cloudinit\_post\_nodeadm) | Array of cloud-init document parts that are created after the nodeadm document part | <pre>list(object({<br/>    content      = string<br/>    content_type = optional(string)<br/>    filename     = optional(string)<br/>    merge_type   = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_cloudinit_pre_nodeadm"></a> [cloudinit\_pre\_nodeadm](#input\_cloudinit\_pre\_nodeadm) | Array of cloud-init document parts that are created before the nodeadm document part | <pre>list(object({<br/>    content      = string<br/>    content_type = optional(string)<br/>    filename     = optional(string)<br/>    merge_type   = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_cluster_auth_base64"></a> [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `null` | no |
+| <a name="input_cluster_endpoint"></a> [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `null` | no |
+| <a name="input_cluster_ip_family"></a> [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no |
+| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `""` | no |
+| <a name="input_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no |
+| <a name="input_cluster_service_cidr"></a> [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself | `string` | `null` | no |
+| <a name="input_cpu_options"></a> [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | <pre>object({<br/>    amd_sev_snp      = optional(string)<br/>    core_count       = optional(number)<br/>    threads_per_core = optional(number)<br/>  })</pre> | `null` | no |
+| <a name="input_create"></a> [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
+| <a name="input_create_iam_role"></a> [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| <a name="input_create_iam_role_policy"></a> [create\_iam\_role\_policy](#input\_create\_iam\_role\_policy) | Determines whether an IAM role policy is created or not | `bool` | `true` | no |
+| <a name="input_create_launch_template"></a> [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no |
+| <a name="input_create_placement_group"></a> [create\_placement\_group](#input\_create\_placement\_group) | Determines whether a placement group is created & used by the node group | `bool` | `false` | no |
+| <a name="input_create_security_group"></a> [create\_security\_group](#input\_create\_security\_group) | Determines if a security group is created | `bool` | `true` | no |
+| <a name="input_credit_specification"></a> [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | <pre>object({<br/>    cpu_credits = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_desired_size"></a> [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no |
+| <a name="input_disable_api_termination"></a> [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
+| <a name="input_disk_size"></a> [disk\_size](#input\_disk\_size) | Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false` | `number` | `null` | no |
+| <a name="input_ebs_optimized"></a> [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance(s) will be EBS-optimized | `bool` | `null` | no |
+| <a name="input_efa_indices"></a> [efa\_indices](#input\_efa\_indices) | The indices of the network interfaces that should be EFA-enabled. Only valid when `enable_efa_support` = `true` | `list(number)` | <pre>[<br/>  0<br/>]</pre> | no |
+| <a name="input_enable_bootstrap_user_data"></a> [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template. Only valid when using a custom AMI via `ami_id` | `bool` | `false` | no |
+| <a name="input_enable_efa_only"></a> [enable\_efa\_only](#input\_enable\_efa\_only) | Determines whether to enable EFA (`false`, default) or EFA and EFA-only (`true`) network interfaces. Note: requires vpc-cni version `v1.18.4` or later | `bool` | `true` | no |
+| <a name="input_enable_efa_support"></a> [enable\_efa\_support](#input\_enable\_efa\_support) | Determines whether to enable Elastic Fabric Adapter (EFA) support | `bool` | `false` | no |
+| <a name="input_enable_monitoring"></a> [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `false` | no |
+| <a name="input_enclave_options"></a> [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | <pre>object({<br/>    enabled = optional(bool)<br/>  })</pre> | `null` | no |
+| <a name="input_force_update_version"></a> [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no |
+| <a name="input_iam_role_additional_policies"></a> [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no |
+| <a name="input_iam_role_arn"></a> [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| <a name="input_iam_role_attach_cni_policy"></a> [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| <a name="input_iam_role_description"></a> [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `"EKS managed node group IAM role"` | no |
+| <a name="input_iam_role_name"></a> [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| <a name="input_iam_role_path"></a> [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| <a name="input_iam_role_permissions_boundary"></a> [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| <a name="input_iam_role_policy_statements"></a> [iam\_role\_policy\_statements](#input\_iam\_role\_policy\_statements) | A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed | <pre>list(object({<br/>    sid           = optional(string)<br/>    actions       = optional(list(string))<br/>    not_actions   = optional(list(string))<br/>    effect        = optional(string)<br/>    resources     = optional(list(string))<br/>    not_resources = optional(list(string))<br/>    principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    not_principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    condition = optional(list(object({<br/>      test     = string<br/>      values   = list(string)<br/>      variable = string<br/>    })))<br/>  }))</pre> | `null` | no |
+| <a name="input_iam_role_tags"></a> [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| <a name="input_iam_role_use_name_prefix"></a> [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_instance_market_options"></a> [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | <pre>object({<br/>    market_type = optional(string)<br/>    spot_options = optional(object({<br/>      block_duration_minutes         = optional(number)<br/>      instance_interruption_behavior = optional(string)<br/>      max_price                      = optional(string)<br/>      spot_instance_type             = optional(string)<br/>      valid_until                    = optional(string)<br/>    }))<br/>  })</pre> | `null` | no |
+| <a name="input_instance_types"></a> [instance\_types](#input\_instance\_types) | Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]` | `list(string)` | `null` | no |
+| <a name="input_kernel_id"></a> [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
+| <a name="input_key_name"></a> [key\_name](#input\_key\_name) | The key name that should be used for the instance(s) | `string` | `null` | no |
+| <a name="input_kubernetes_version"></a> [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no |
+| <a name="input_labels"></a> [labels](#input\_labels) | Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed | `map(string)` | `null` | no |
+| <a name="input_launch_template_default_version"></a> [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default version of the launch template | `string` | `null` | no |
+| <a name="input_launch_template_description"></a> [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no |
+| <a name="input_launch_template_id"></a> [launch\_template\_id](#input\_launch\_template\_id) | The ID of an existing launch template to use. Required when `create_launch_template` = `false` and `use_custom_launch_template` = `true` | `string` | `""` | no |
+| <a name="input_launch_template_name"></a> [launch\_template\_name](#input\_launch\_template\_name) | Name of launch template to be created | `string` | `null` | no |
+| <a name="input_launch_template_tags"></a> [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no |
+| <a name="input_launch_template_use_name_prefix"></a> [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
+| <a name="input_launch_template_version"></a> [launch\_template\_version](#input\_launch\_template\_version) | Launch template version number. The default is `$Default` | `string` | `null` | no |
+| <a name="input_license_specifications"></a> [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | <pre>list(object({<br/>    license_configuration_arn = string<br/>  }))</pre> | `null` | no |
+| <a name="input_maintenance_options"></a> [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | <pre>object({<br/>    auto_recovery = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_max_size"></a> [max\_size](#input\_max\_size) | Maximum number of instances/nodes | `number` | `3` | no |
+| <a name="input_metadata_options"></a> [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | <pre>object({<br/>    http_endpoint               = optional(string, "enabled")<br/>    http_protocol_ipv6          = optional(string)<br/>    http_put_response_hop_limit = optional(number, 1)<br/>    http_tokens                 = optional(string, "required")<br/>    instance_metadata_tags      = optional(string)<br/>  })</pre> | <pre>{<br/>  "http_endpoint": "enabled",<br/>  "http_put_response_hop_limit": 1,<br/>  "http_tokens": "required"<br/>}</pre> | no |
+| <a name="input_min_size"></a> [min\_size](#input\_min\_size) | Minimum number of instances/nodes | `number` | `1` | no |
+| <a name="input_name"></a> [name](#input\_name) | Name of the EKS managed node group | `string` | `""` | no |
+| <a name="input_network_interfaces"></a> [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | <pre>list(object({<br/>    associate_carrier_ip_address = optional(bool)<br/>    associate_public_ip_address  = optional(bool)<br/>    connection_tracking_specification = optional(object({<br/>      tcp_established_timeout = optional(number)<br/>      udp_stream_timeout      = optional(number)<br/>      udp_timeout             = optional(number)<br/>    }))<br/>    delete_on_termination = optional(bool)<br/>    description           = optional(string)<br/>    device_index          = optional(number)<br/>    ena_srd_specification = optional(object({<br/>      ena_srd_enabled = optional(bool)<br/>      ena_srd_udp_specification = optional(object({<br/>        ena_srd_udp_enabled = optional(bool)<br/>      }))<br/>    }))<br/>    interface_type       = optional(string)<br/>    ipv4_address_count   = optional(number)<br/>    ipv4_addresses       = optional(list(string))<br/>    ipv4_prefix_count    = optional(number)<br/>    ipv4_prefixes        = optional(list(string))<br/>    ipv6_address_count   = optional(number)<br/>    ipv6_addresses       = optional(list(string))<br/>    ipv6_prefix_count    = optional(number)<br/>    ipv6_prefixes        = optional(list(string))<br/>    network_card_index   = optional(number)<br/>    network_interface_id = optional(string)<br/>    primary_ipv6         = optional(bool)<br/>    private_ip_address   = optional(string)<br/>    security_groups      = optional(list(string), [])<br/>    subnet_id            = optional(string)<br/>  }))</pre> | `[]` | no |
+| <a name="input_node_repair_config"></a> [node\_repair\_config](#input\_node\_repair\_config) | The node auto repair configuration for the node group | <pre>object({<br/>    enabled = optional(bool, true)<br/>  })</pre> | `null` | no |
+| <a name="input_partition"></a> [partition](#input\_partition) | The AWS partition - pass through value to reduce number of GET requests from data sources | `string` | `""` | no |
+| <a name="input_placement"></a> [placement](#input\_placement) | The placement of the instance | <pre>object({<br/>    affinity                = optional(string)<br/>    availability_zone       = optional(string)<br/>    group_name              = optional(string)<br/>    host_id                 = optional(string)<br/>    host_resource_group_arn = optional(string)<br/>    partition_number        = optional(number)<br/>    spread_domain           = optional(string)<br/>    tenancy                 = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_post_bootstrap_user_data"></a> [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `null` | no |
+| <a name="input_pre_bootstrap_user_data"></a> [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `null` | no |
+| <a name="input_private_dns_name_options"></a> [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | <pre>object({<br/>    enable_resource_name_dns_aaaa_record = optional(bool)<br/>    enable_resource_name_dns_a_record    = optional(bool)<br/>    hostname_type                        = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_ram_disk_id"></a> [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
+| <a name="input_region"></a> [region](#input\_region) | Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration | `string` | `null` | no |
+| <a name="input_remote_access"></a> [remote\_access](#input\_remote\_access) | Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false` | <pre>object({<br/>    ec2_ssh_key               = optional(string)<br/>    source_security_group_ids = optional(list(string))<br/>  })</pre> | `null` | no |
+| <a name="input_security_group_description"></a> [security\_group\_description](#input\_security\_group\_description) | Description of the security group created | `string` | `null` | no |
+| <a name="input_security_group_egress_rules"></a> [security\_group\_egress\_rules](#input\_security\_group\_egress\_rules) | Security group egress rules to add to the security group created | <pre>map(object({<br/>    name = optional(string)<br/><br/>    cidr_ipv4                    = optional(string)<br/>    cidr_ipv6                    = optional(string)<br/>    description                  = optional(string)<br/>    from_port                    = optional(string)<br/>    ip_protocol                  = optional(string, "tcp")<br/>    prefix_list_id               = optional(string)<br/>    referenced_security_group_id = optional(string)<br/>    self                         = optional(bool, false)<br/>    tags                         = optional(map(string), {})<br/>    to_port                      = optional(string)<br/>  }))</pre> | `{}` | no |
+| <a name="input_security_group_ingress_rules"></a> [security\_group\_ingress\_rules](#input\_security\_group\_ingress\_rules) | Security group ingress rules to add to the security group created | <pre>map(object({<br/>    name = optional(string)<br/><br/>    cidr_ipv4                    = optional(string)<br/>    cidr_ipv6                    = optional(string)<br/>    description                  = optional(string)<br/>    from_port                    = optional(string)<br/>    ip_protocol                  = optional(string, "tcp")<br/>    prefix_list_id               = optional(string)<br/>    referenced_security_group_id = optional(string)<br/>    self                         = optional(bool, false)<br/>    tags                         = optional(map(string), {})<br/>    to_port                      = optional(string)<br/>  }))</pre> | `{}` | no |
+| <a name="input_security_group_name"></a> [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
+| <a name="input_security_group_tags"></a> [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
+| <a name="input_security_group_use_name_prefix"></a> [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_subnet_ids"></a> [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no |
+| <a name="input_tag_specifications"></a> [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(string)` | <pre>[<br/>  "instance",<br/>  "volume",<br/>  "network-interface"<br/>]</pre> | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| <a name="input_taints"></a> [taints](#input\_taints) | The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group | <pre>map(object({<br/>    key    = string<br/>    value  = optional(string)<br/>    effect = string<br/>  }))</pre> | `null` | no |
+| <a name="input_timeouts"></a> [timeouts](#input\_timeouts) | Create, update, and delete timeout configurations for the node group | <pre>object({<br/>    create = optional(string)<br/>    update = optional(string)<br/>    delete = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_update_config"></a> [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | <pre>object({<br/>    max_unavailable            = optional(number)<br/>    max_unavailable_percentage = optional(number)<br/>  })</pre> | <pre>{<br/>  "max_unavailable_percentage": 33<br/>}</pre> | no |
+| <a name="input_update_launch_template_default_version"></a> [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no |
+| <a name="input_use_custom_launch_template"></a> [use\_custom\_launch\_template](#input\_use\_custom\_launch\_template) | Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no |
+| <a name="input_use_latest_ami_release_version"></a> [use\_latest\_ami\_release\_version](#input\_use\_latest\_ami\_release\_version) | Determines whether to use the latest AMI release version for the given `ami_type` (except for `CUSTOM`). Note: `ami_type` and `kubernetes_version` must be supplied in order to enable this feature | `bool` | `true` | no |
+| <a name="input_use_name_prefix"></a> [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| <a name="input_user_data_template_path"></a> [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `null` | no |
+| <a name="input_vpc_security_group_ids"></a> [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_iam_role_arn"></a> [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role |
+| <a name="output_iam_role_name"></a> [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role |
+| <a name="output_iam_role_unique_id"></a> [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_launch_template_arn"></a> [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
+| <a name="output_launch_template_id"></a> [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
+| <a name="output_launch_template_latest_version"></a> [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
+| <a name="output_launch_template_name"></a> [launch\_template\_name](#output\_launch\_template\_name) | The name of the launch template |
+| <a name="output_node_group_arn"></a> [node\_group\_arn](#output\_node\_group\_arn) | Amazon Resource Name (ARN) of the EKS Node Group |
+| <a name="output_node_group_autoscaling_group_names"></a> [node\_group\_autoscaling\_group\_names](#output\_node\_group\_autoscaling\_group\_names) | List of the autoscaling group names |
+| <a name="output_node_group_id"></a> [node\_group\_id](#output\_node\_group\_id) | EKS Cluster name and EKS Node Group name separated by a colon (`:`) |
+| <a name="output_node_group_labels"></a> [node\_group\_labels](#output\_node\_group\_labels) | Map of labels applied to the node group |
+| <a name="output_node_group_resources"></a> [node\_group\_resources](#output\_node\_group\_resources) | List of objects containing information about underlying resources |
+| <a name="output_node_group_status"></a> [node\_group\_status](#output\_node\_group\_status) | Status of the EKS Node Group |
+| <a name="output_node_group_taints"></a> [node\_group\_taints](#output\_node\_group\_taints) | List of objects containing information about taints applied to the node group |
+| <a name="output_security_group_arn"></a> [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group |
+| <a name="output_security_group_id"></a> [security\_group\_id](#output\_security\_group\_id) | ID of the security group |
+<!-- END_TF_DOCS -->
diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf
new file mode 100644
index 0000000000..d07c38633d
--- /dev/null
+++ b/modules/eks-managed-node-group/main.tf
@@ -0,0 +1,784 @@
+data "aws_partition" "current" {
+  count = var.create && var.partition == "" ? 1 : 0
+}
+data "aws_caller_identity" "current" {
+  count = var.create && var.account_id == "" ? 1 : 0
+}
+
+locals {
+  partition  = try(data.aws_partition.current[0].partition, var.partition)
+  account_id = try(data.aws_caller_identity.current[0].account_id, var.account_id)
+}
+
+################################################################################
+# User Data
+################################################################################
+
+module "user_data" {
+  source = "../_user_data"
+
+  create   = var.create
+  ami_type = var.ami_type
+
+  cluster_name         = var.cluster_name
+  cluster_endpoint     = var.cluster_endpoint
+  cluster_auth_base64  = var.cluster_auth_base64
+  cluster_ip_family    = var.cluster_ip_family
+  cluster_service_cidr = var.cluster_service_cidr
+
+  enable_bootstrap_user_data = var.enable_bootstrap_user_data
+  pre_bootstrap_user_data    = var.pre_bootstrap_user_data
+  post_bootstrap_user_data   = var.post_bootstrap_user_data
+  bootstrap_extra_args       = var.bootstrap_extra_args
+  user_data_template_path    = var.user_data_template_path
+
+  cloudinit_pre_nodeadm  = var.cloudinit_pre_nodeadm
+  cloudinit_post_nodeadm = var.cloudinit_post_nodeadm
+}
+
+################################################################################
+# EFA Support
+################################################################################
+
+data "aws_ec2_instance_type" "this" {
+  count = var.create && var.enable_efa_support ? 1 : 0
+
+  region = var.region
+
+  instance_type = local.efa_instance_type
+}
+
+locals {
+  enable_efa_support = var.create && var.enable_efa_support
+
+  efa_instance_type = try(element(var.instance_types, 0), "")
+  num_network_cards = try(data.aws_ec2_instance_type.this[0].maximum_network_cards, 0)
+
+  # Primary network interface must be EFA, remaining can be EFA or EFA-only
+  efa_network_interfaces = [
+    for i in range(local.num_network_cards) : {
+      associate_public_ip_address = false
+      delete_on_termination       = true
+      device_index                = i == 0 ? 0 : 1
+      network_card_index          = i
+      interface_type              = var.enable_efa_only ? contains(concat([0], var.efa_indices), i) ? "efa" : "efa-only" : "efa"
+    }
+  ]
+
+  network_interfaces = local.enable_efa_support ? local.efa_network_interfaces : var.network_interfaces
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+locals {
+  launch_template_name = coalesce(var.launch_template_name, "${var.name}-eks-node-group")
+  security_group_ids   = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids, aws_security_group.this[*].id))
+}
+
+resource "aws_launch_template" "this" {
+  count = var.create && var.create_launch_template && var.use_custom_launch_template ? 1 : 0
+
+  region = var.region
+
+  dynamic "block_device_mappings" {
+    for_each = var.block_device_mappings != null ? var.block_device_mappings : {}
+
+    content {
+      device_name = block_device_mappings.value.device_name
+
+      dynamic "ebs" {
+        for_each = block_device_mappings.value.ebs != null ? [block_device_mappings.value.ebs] : []
+
+        content {
+          delete_on_termination      = ebs.value.delete_on_termination
+          encrypted                  = ebs.value.encrypted
+          iops                       = ebs.value.iops
+          kms_key_id                 = ebs.value.kms_key_id
+          snapshot_id                = ebs.value.snapshot_id
+          throughput                 = ebs.value.throughput
+          volume_initialization_rate = ebs.value.volume_initialization_rate
+          volume_size                = ebs.value.volume_size
+          volume_type                = ebs.value.volume_type
+        }
+      }
+
+      no_device    = block_device_mappings.value.no_device
+      virtual_name = block_device_mappings.value.virtual_name
+    }
+  }
+
+  dynamic "capacity_reservation_specification" {
+    for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : []
+
+    content {
+      capacity_reservation_preference = capacity_reservation_specification.value.capacity_reservation_preference
+
+      dynamic "capacity_reservation_target" {
+        for_each = capacity_reservation_specification.value.capacity_reservation_target != null ? [capacity_reservation_specification.value.capacity_reservation_target] : []
+        content {
+          capacity_reservation_id                 = capacity_reservation_target.value.capacity_reservation_id
+          capacity_reservation_resource_group_arn = capacity_reservation_target.value.capacity_reservation_resource_group_arn
+        }
+      }
+    }
+  }
+
+  dynamic "cpu_options" {
+    for_each = var.cpu_options != null ? [var.cpu_options] : []
+
+    content {
+      amd_sev_snp      = cpu_options.value.amd_sev_snp
+      core_count       = cpu_options.value.core_count
+      threads_per_core = cpu_options.value.threads_per_core
+    }
+  }
+
+  dynamic "credit_specification" {
+    for_each = var.credit_specification != null ? [var.credit_specification] : []
+
+    content {
+      cpu_credits = credit_specification.value.cpu_credits
+    }
+  }
+
+  default_version         = var.launch_template_default_version
+  description             = var.launch_template_description
+  disable_api_termination = var.disable_api_termination
+  ebs_optimized           = var.ebs_optimized
+
+  dynamic "enclave_options" {
+    for_each = var.enclave_options != null ? [var.enclave_options] : []
+
+    content {
+      enabled = enclave_options.value.enabled
+    }
+  }
+
+  # Set on EKS managed node group, will fail if set here
+  # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics
+  # dynamic "hibernation_options" {
+  #   for_each = length(var.hibernation_options) > 0 ? [var.hibernation_options] : []
+
+  #   content {
+  #     configured = hibernation_options.value.configured
+  #   }
+  # }
+
+  # Set on EKS managed node group, will fail if set here
+  # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics
+  # dynamic "iam_instance_profile" {
+  #   for_each = [var.iam_instance_profile]
+  #   content {
+  #     name = lookup(var.iam_instance_profile, "name", null)
+  #     arn  = lookup(var.iam_instance_profile, "arn", null)
+  #   }
+  # }
+
+  image_id = var.ami_id
+  # Set on EKS managed node group, will fail if set here
+  # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics
+  # instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
+
+  dynamic "instance_market_options" {
+    for_each = var.instance_market_options != null ? [var.instance_market_options] : []
+
+    content {
+      market_type = instance_market_options.value.market_type
+
+      dynamic "spot_options" {
+        for_each = instance_market_options.value.spot_options != null ? [instance_market_options.value.spot_options] : []
+
+        content {
+          block_duration_minutes         = spot_options.value.block_duration_minutes
+          instance_interruption_behavior = spot_options.value.instance_interruption_behavior
+          max_price                      = spot_options.value.max_price
+          spot_instance_type             = spot_options.value.spot_instance_type
+          valid_until                    = spot_options.value.valid_until
+        }
+      }
+    }
+  }
+
+  # Instance type(s) are generally set on the node group,
+  # except when a ML capacity block reseravtion is used
+  instance_type = var.capacity_type == "CAPACITY_BLOCK" ? element(var.instance_types, 0) : null
+  kernel_id     = var.kernel_id
+  key_name      = var.key_name
+
+  dynamic "license_specification" {
+    for_each = var.license_specifications != null ? var.license_specifications : []
+
+    content {
+      license_configuration_arn = license_specification.value.license_configuration_arn
+    }
+  }
+
+  dynamic "maintenance_options" {
+    for_each = var.maintenance_options != null ? [var.maintenance_options] : []
+
+    content {
+      auto_recovery = maintenance_options.value.auto_recovery
+    }
+  }
+
+  dynamic "metadata_options" {
+    for_each = var.metadata_options != null ? [var.metadata_options] : []
+
+    content {
+      http_endpoint               = metadata_options.value.http_endpoint
+      http_protocol_ipv6          = metadata_options.value.http_protocol_ipv6
+      http_put_response_hop_limit = metadata_options.value.http_put_response_hop_limit
+      http_tokens                 = metadata_options.value.http_tokens
+      instance_metadata_tags      = metadata_options.value.instance_metadata_tags
+    }
+  }
+
+  dynamic "monitoring" {
+    for_each = var.enable_monitoring ? [1] : []
+
+    content {
+      enabled = var.enable_monitoring
+    }
+  }
+
+  name        = var.launch_template_use_name_prefix ? null : local.launch_template_name
+  name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null
+
+  dynamic "network_interfaces" {
+    for_each = length(var.network_interfaces) > 0 ? var.network_interfaces : []
+
+    content {
+      associate_carrier_ip_address = network_interfaces.value.associate_carrier_ip_address
+      associate_public_ip_address  = network_interfaces.value.associate_public_ip_address
+
+      dynamic "connection_tracking_specification" {
+        for_each = network_interfaces.value.connection_tracking_specification != null ? [network_interfaces.value.connection_tracking_specification] : []
+
+        content {
+          tcp_established_timeout = connection_tracking_specification.value.tcp_established_timeout
+          udp_stream_timeout      = connection_tracking_specification.value.udp_stream_timeout
+          udp_timeout             = connection_tracking_specification.value.udp_timeout
+        }
+      }
+
+      delete_on_termination = network_interfaces.value.delete_on_termination
+      description           = network_interfaces.value.description
+      device_index          = network_interfaces.value.device_index
+
+      dynamic "ena_srd_specification" {
+        for_each = network_interfaces.value.ena_srd_specification != null ? [network_interfaces.value.ena_srd_specification] : []
+
+        content {
+          ena_srd_enabled = ena_srd_specification.value.ena_srd_enabled
+
+          dynamic "ena_srd_udp_specification" {
+            for_each = ena_srd_specification.value.ena_srd_udp_specification != null ? [ena_srd_specification.value.ena_srd_udp_specification] : []
+
+            content {
+              ena_srd_udp_enabled = ena_srd_udp_specification.value.ena_srd_udp_enabled
+            }
+          }
+        }
+      }
+
+      interface_type       = network_interfaces.value.interface_type
+      ipv4_address_count   = network_interfaces.value.ipv4_address_count
+      ipv4_addresses       = network_interfaces.value.ipv4_addresses
+      ipv4_prefix_count    = network_interfaces.value.ipv4_prefix_count
+      ipv4_prefixes        = network_interfaces.value.ipv4_prefixes
+      ipv6_address_count   = network_interfaces.value.ipv6_address_count
+      ipv6_addresses       = network_interfaces.value.ipv6_addresses
+      ipv6_prefix_count    = network_interfaces.value.ipv6_prefix_count
+      ipv6_prefixes        = network_interfaces.value.ipv6_prefixes
+      network_card_index   = network_interfaces.value.network_card_index
+      network_interface_id = network_interfaces.value.network_interface_id
+      primary_ipv6         = network_interfaces.value.primary_ipv6
+      private_ip_address   = network_interfaces.value.private_ip_address
+      # Ref: https://github.com/hashicorp/terraform-provider-aws/issues/4570
+      security_groups = compact(concat(network_interfaces.value.security_groups, var.vpc_security_group_ids))
+      # Set on EKS managed node group, will fail if set here
+      # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics
+      # subnet_id       = try(network_interfaces.value.subnet_id, null)
+    }
+  }
+
+  dynamic "placement" {
+    for_each = var.placement != null || local.create_placement_group ? [var.placement] : []
+
+    content {
+      affinity                = try(placement.value.affinity, null)
+      availability_zone       = try(placement.value.availability_zone, null)
+      group_name              = try(aws_placement_group.this[0].name, placement.value.group_name)
+      host_id                 = try(placement.value.host_id, null)
+      host_resource_group_arn = try(placement.value.host_resource_group_arn, null)
+      partition_number        = try(placement.value.partition_number, null)
+      spread_domain           = try(placement.value.spread_domain, null)
+      tenancy                 = try(placement.value.tenancy, null)
+    }
+  }
+
+  dynamic "private_dns_name_options" {
+    for_each = var.private_dns_name_options != null ? [var.private_dns_name_options] : []
+
+    content {
+      enable_resource_name_dns_aaaa_record = private_dns_name_options.value.enable_resource_name_dns_aaaa_record
+      enable_resource_name_dns_a_record    = private_dns_name_options.value.enable_resource_name_dns_a_record
+      hostname_type                        = private_dns_name_options.value.hostname_type
+    }
+  }
+
+  ram_disk_id = var.ram_disk_id
+
+  dynamic "tag_specifications" {
+    for_each = toset(var.tag_specifications)
+
+    content {
+      resource_type = tag_specifications.key
+      tags          = merge(var.tags, { Name = var.name }, var.launch_template_tags)
+    }
+  }
+
+  update_default_version = var.update_launch_template_default_version
+  user_data              = module.user_data.user_data
+  vpc_security_group_ids = length(local.network_interfaces) > 0 ? [] : local.security_group_ids
+
+  tags = merge(
+    var.tags,
+    var.launch_template_tags,
+  )
+
+  # Prevent premature access of policies by pods that
+  # require permissions on create/destroy that depend on nodes
+  depends_on = [
+    aws_iam_role_policy_attachment.this,
+    aws_iam_role_policy_attachment.additional,
+  ]
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+################################################################################
+# AMI SSM Parameter
+################################################################################
+
+data "aws_eks_cluster_versions" "this" {
+  count = var.create && var.kubernetes_version == null ? 1 : 0
+
+  region = var.region
+
+  cluster_type   = "eks"
+  version_status = "STANDARD_SUPPORT"
+}
+
+locals {
+  # Just to ensure templating doesn't fail when values are not provided
+  ssm_kubernetes_version = var.kubernetes_version != null ? var.kubernetes_version : try(data.aws_eks_cluster_versions.this[0].cluster_versions[0].cluster_version, "UNSPECIFIED")
+  ssm_ami_type           = var.ami_type != null ? var.ami_type : ""
+
+  # Map the AMI type to the respective SSM param path
+  ssm_ami_type_to_ssm_param = {
+    AL2_x86_64                 = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2/recommended/release_version"
+    AL2_x86_64_GPU             = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2-gpu/recommended/release_version"
+    AL2_ARM_64                 = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2-arm64/recommended/release_version"
+    CUSTOM                     = "NONE"
+    BOTTLEROCKET_ARM_64        = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}/arm64/latest/image_version"
+    BOTTLEROCKET_x86_64        = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}/x86_64/latest/image_version"
+    BOTTLEROCKET_ARM_64_FIPS   = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-fips/arm64/latest/image_version"
+    BOTTLEROCKET_x86_64_FIPS   = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-fips/x86_64/latest/image_version"
+    BOTTLEROCKET_ARM_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-nvidia/arm64/latest/image_version"
+    BOTTLEROCKET_x86_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-nvidia/x86_64/latest/image_version"
+    WINDOWS_CORE_2019_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Full-EKS_Optimized-${local.ssm_kubernetes_version}"
+    WINDOWS_FULL_2019_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Core-EKS_Optimized-${local.ssm_kubernetes_version}"
+    WINDOWS_CORE_2022_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Full-EKS_Optimized-${local.ssm_kubernetes_version}"
+    WINDOWS_FULL_2022_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Core-EKS_Optimized-${local.ssm_kubernetes_version}"
+    AL2023_x86_64_STANDARD     = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/x86_64/standard/recommended/release_version"
+    AL2023_ARM_64_STANDARD     = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/arm64/standard/recommended/release_version"
+    AL2023_x86_64_NEURON       = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/x86_64/neuron/recommended/release_version"
+    AL2023_x86_64_NVIDIA       = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/x86_64/nvidia/recommended/release_version"
+    AL2023_ARM_64_NVIDIA       = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/arm64/nvidia/recommended/release_version"
+  }
+
+  # The Windows SSM params currently do not have a release version, so we have to get the full output JSON blob and parse out the release version
+  windows_latest_ami_release_version = var.create && var.use_latest_ami_release_version && startswith(local.ssm_ami_type, "WINDOWS") ? nonsensitive(jsondecode(data.aws_ssm_parameter.ami[0].value)["release_version"]) : null
+  # Based on the steps above, try to get an AMI release version - if not, `null` is returned
+  latest_ami_release_version = startswith(local.ssm_ami_type, "WINDOWS") ? local.windows_latest_ami_release_version : try(nonsensitive(data.aws_ssm_parameter.ami[0].value), null)
+}
+
+data "aws_ssm_parameter" "ami" {
+  count = var.create && var.use_latest_ami_release_version ? 1 : 0
+
+  region = var.region
+
+  name = local.ssm_ami_type_to_ssm_param[var.ami_type]
+}
+
+################################################################################
+# Node Group
+################################################################################
+
+locals {
+  launch_template_id = var.create && var.create_launch_template ? try(aws_launch_template.this[0].id, null) : var.launch_template_id
+  # Change order to allow users to set version priority before using defaults
+  launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
+}
+
+resource "aws_eks_node_group" "this" {
+  count = var.create ? 1 : 0
+
+  region = var.region
+
+  # Required
+  cluster_name  = var.cluster_name
+  node_role_arn = var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn
+  subnet_ids    = var.subnet_ids
+
+  scaling_config {
+    min_size     = var.min_size
+    max_size     = var.max_size
+    desired_size = var.desired_size
+  }
+
+  # Optional
+  node_group_name        = var.use_name_prefix ? null : var.name
+  node_group_name_prefix = var.use_name_prefix ? "${var.name}-" : null
+
+  # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
+  ami_type        = var.ami_id != "" ? null : var.ami_type
+  release_version = var.ami_id != "" ? null : var.use_latest_ami_release_version ? local.latest_ami_release_version : var.ami_release_version
+  version         = var.ami_id != "" ? null : var.kubernetes_version
+
+  capacity_type        = var.capacity_type
+  disk_size            = var.use_custom_launch_template ? null : var.disk_size # if using a custom LT, set disk size on custom LT or else it will error here
+  force_update_version = var.force_update_version
+  # ML capacity block reservation requires instance type to be set on the launch template
+  instance_types = var.capacity_type == "CAPACITY_BLOCK" ? null : var.instance_types
+  labels         = var.labels
+
+  dynamic "launch_template" {
+    for_each = var.use_custom_launch_template ? [1] : []
+
+    content {
+      id      = local.launch_template_id
+      version = local.launch_template_version
+    }
+  }
+
+  dynamic "remote_access" {
+    for_each = var.remote_access != null ? [var.remote_access] : []
+
+    content {
+      ec2_ssh_key               = remote_access.value.ec2_ssh_key
+      source_security_group_ids = remote_access.value.source_security_group_ids
+    }
+  }
+
+  dynamic "taint" {
+    for_each = var.taints != null ? var.taints : {}
+
+    content {
+      key    = taint.value.key
+      value  = taint.value.value
+      effect = taint.value.effect
+    }
+  }
+
+  dynamic "update_config" {
+    for_each = var.update_config != null ? [var.update_config] : []
+
+    content {
+      max_unavailable_percentage = update_config.value.max_unavailable_percentage
+      max_unavailable            = update_config.value.max_unavailable
+    }
+  }
+
+  dynamic "node_repair_config" {
+    for_each = var.node_repair_config != null ? [var.node_repair_config] : []
+
+    content {
+      enabled = node_repair_config.value.enabled
+    }
+  }
+
+  dynamic "timeouts" {
+    for_each = var.timeouts != null ? [var.timeouts] : []
+
+    content {
+      create = var.timeouts.create
+      update = var.timeouts.update
+      delete = var.timeouts.delete
+    }
+  }
+
+  lifecycle {
+    create_before_destroy = true
+    ignore_changes = [
+      scaling_config[0].desired_size,
+    ]
+  }
+
+  tags = merge(
+    var.tags,
+    { Name = var.name }
+  )
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+locals {
+  create_iam_role = var.create && var.create_iam_role
+
+  iam_role_name          = coalesce(var.iam_role_name, "${var.name}-eks-node-group")
+  iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy"
+
+  ipv4_cni_policy = { for k, v in {
+    AmazonEKS_CNI_Policy = "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy"
+  } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" }
+  ipv6_cni_policy = { for k, v in {
+    AmazonEKS_CNI_IPv6_Policy = "arn:${local.partition}:iam::${local.account_id}:policy/AmazonEKS_CNI_IPv6_Policy"
+  } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" }
+}
+
+data "aws_iam_policy_document" "assume_role_policy" {
+  count = local.create_iam_role ? 1 : 0
+
+  statement {
+    sid     = "EKSNodeAssumeRole"
+    actions = ["sts:AssumeRole"]
+
+    principals {
+      type        = "Service"
+      identifiers = ["ec2.amazonaws.com"]
+    }
+  }
+}
+
+resource "aws_iam_role" "this" {
+  count = local.create_iam_role ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  path        = var.iam_role_path
+  description = var.iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.assume_role_policy[0].json
+  permissions_boundary  = var.iam_role_permissions_boundary
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.iam_role_tags)
+}
+
+# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
+resource "aws_iam_role_policy_attachment" "this" {
+  for_each = { for k, v in merge(
+    {
+      AmazonEKSWorkerNodePolicy          = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy"
+      AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly"
+    },
+    local.ipv4_cni_policy,
+    local.ipv6_cni_policy
+  ) : k => v if local.create_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "additional" {
+  for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+################################################################################
+# IAM Role Policy
+################################################################################
+
+locals {
+  create_iam_role_policy = local.create_iam_role && var.create_iam_role_policy && var.iam_role_policy_statements != null
+}
+
+data "aws_iam_policy_document" "role" {
+  count = local.create_iam_role_policy ? 1 : 0
+
+  dynamic "statement" {
+    for_each = var.iam_role_policy_statements != null ? var.iam_role_policy_statements : []
+
+    content {
+      sid           = statement.value.sid
+      actions       = statement.value.actions
+      not_actions   = statement.value.not_actions
+      effect        = statement.value.effect
+      resources     = statement.value.resources
+      not_resources = statement.value.not_resources
+
+      dynamic "principals" {
+        for_each = statement.value.principals != null ? statement.value.principals : []
+
+        content {
+          type        = principals.value.type
+          identifiers = principals.value.identifiers
+        }
+      }
+
+      dynamic "not_principals" {
+        for_each = statement.value.not_principals != null ? statement.value.not_principals : []
+
+        content {
+          type        = not_principals.value.type
+          identifiers = not_principals.value.identifiers
+        }
+      }
+
+      dynamic "condition" {
+        for_each = statement.value.condition != null ? statement.value.condition : []
+
+        content {
+          test     = condition.value.test
+          values   = condition.value.values
+          variable = condition.value.variable
+        }
+      }
+    }
+  }
+}
+
+resource "aws_iam_role_policy" "this" {
+  count = local.create_iam_role_policy ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  policy      = data.aws_iam_policy_document.role[0].json
+  role        = aws_iam_role.this[0].id
+}
+
+################################################################################
+# Placement Group
+################################################################################
+
+locals {
+  create_placement_group = var.create && (local.enable_efa_support || var.create_placement_group)
+}
+
+resource "aws_placement_group" "this" {
+  count = local.create_placement_group ? 1 : 0
+
+  region = var.region
+
+  name     = "${var.cluster_name}-${var.name}"
+  strategy = "cluster"
+
+  tags = var.tags
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+locals {
+  create_security_group = var.create && var.create_security_group && length(merge(local.security_group_ingress_rules, local.security_group_egress_rules)) > 0
+  security_group_name   = coalesce(var.security_group_name, "${var.cluster_name}-${var.name}")
+
+  security_group_ingress_rules = merge({ for k, v in
+    {
+      all_self_efa = {
+        description = "Node to node EFA"
+        protocol    = "-1"
+        from_port   = 0
+        self        = true
+      }
+    } : k => v if var.enable_efa_support
+    },
+    var.security_group_ingress_rules
+  )
+  security_group_egress_rules = merge({ for k, v in
+    {
+      all_self_efa = {
+        description = "Node to node EFA"
+        protocol    = "-1"
+        to_port     = 0
+        self        = true
+      }
+    } : k => v if var.enable_efa_support
+    },
+    var.security_group_egress_rules
+  )
+}
+
+data "aws_subnet" "this" {
+  count = local.create_security_group ? 1 : 0
+
+  region = var.region
+
+  id = element(var.subnet_ids, 0)
+}
+
+resource "aws_security_group" "this" {
+  count = local.create_security_group ? 1 : 0
+
+  region = var.region
+
+  name        = var.security_group_use_name_prefix ? null : local.security_group_name
+  name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null
+  description = var.security_group_description
+  vpc_id      = data.aws_subnet.this[0].vpc_id
+
+  tags = merge(
+    var.tags,
+    { "Name" = local.security_group_name },
+    var.security_group_tags
+  )
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+resource "aws_vpc_security_group_ingress_rule" "this" {
+  for_each = { for k, v in local.security_group_ingress_rules : k => v if length(local.security_group_ingress_rules) > 0 && local.create_security_group }
+
+  region = var.region
+
+  cidr_ipv4                    = each.value.cidr_ipv4
+  cidr_ipv6                    = each.value.cidr_ipv6
+  description                  = each.value.description
+  from_port                    = each.value.from_port
+  ip_protocol                  = each.value.ip_protocol
+  prefix_list_id               = each.value.prefix_list_id
+  referenced_security_group_id = each.value.self ? aws_security_group.this[0].id : each.value.referenced_security_group_id
+  security_group_id            = aws_security_group.this[0].id
+  tags = merge(
+    var.tags,
+    var.security_group_tags,
+    { "Name" = coalesce(each.value.name, "${local.security_group_name}-${each.key}") },
+    each.value.tags
+  )
+  to_port = try(coalesce(each.value.to_port, each.value.from_port), null)
+}
+
+resource "aws_vpc_security_group_egress_rule" "this" {
+  for_each = { for k, v in local.security_group_egress_rules : k => v if length(local.security_group_egress_rules) > 0 && local.create_security_group }
+
+  region = var.region
+
+  cidr_ipv4                    = each.value.cidr_ipv4
+  cidr_ipv6                    = each.value.cidr_ipv6
+  description                  = each.value.description
+  from_port                    = try(coalesce(each.value.from_port, each.value.to_port), null)
+  ip_protocol                  = each.value.ip_protocol
+  prefix_list_id               = each.value.prefix_list_id
+  referenced_security_group_id = each.value.self ? aws_security_group.this[0].id : each.value.referenced_security_group_id
+  security_group_id            = aws_security_group.this[0].id
+  tags = merge(
+    var.tags,
+    var.security_group_tags,
+    { "Name" = coalesce(each.value.name, "${local.security_group_name}-${each.key}") },
+    each.value.tags
+  )
+  to_port = each.value.to_port
+}
diff --git a/modules/eks-managed-node-group/migrations.tf b/modules/eks-managed-node-group/migrations.tf
new file mode 100644
index 0000000000..5d51a7208a
--- /dev/null
+++ b/modules/eks-managed-node-group/migrations.tf
@@ -0,0 +1,20 @@
+################################################################################
+# Migrations: v20.7 -> v20.8
+################################################################################
+
+# Node IAM role policy attachment
+# Commercial partition only - `moved` does now allow multiple moves to same target
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]
+}
diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..a7d6fcf62b
--- /dev/null
+++ b/modules/eks-managed-node-group/outputs.tf
@@ -0,0 +1,95 @@
+################################################################################
+# Launch template
+################################################################################
+
+output "launch_template_id" {
+  description = "The ID of the launch template"
+  value       = try(aws_launch_template.this[0].id, null)
+}
+
+output "launch_template_arn" {
+  description = "The ARN of the launch template"
+  value       = try(aws_launch_template.this[0].arn, null)
+}
+
+output "launch_template_latest_version" {
+  description = "The latest version of the launch template"
+  value       = try(aws_launch_template.this[0].latest_version, null)
+}
+
+output "launch_template_name" {
+  description = "The name of the launch template"
+  value       = try(aws_launch_template.this[0].name, null)
+}
+
+################################################################################
+# Node Group
+################################################################################
+
+output "node_group_arn" {
+  description = "Amazon Resource Name (ARN) of the EKS Node Group"
+  value       = try(aws_eks_node_group.this[0].arn, null)
+}
+
+output "node_group_id" {
+  description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)"
+  value       = try(aws_eks_node_group.this[0].id, null)
+}
+
+output "node_group_resources" {
+  description = "List of objects containing information about underlying resources"
+  value       = try(aws_eks_node_group.this[0].resources, null)
+}
+
+output "node_group_autoscaling_group_names" {
+  description = "List of the autoscaling group names"
+  value       = try(flatten(aws_eks_node_group.this[0].resources[*].autoscaling_groups[*].name), [])
+}
+
+output "node_group_status" {
+  description = "Status of the EKS Node Group"
+  value       = try(aws_eks_node_group.this[0].status, null)
+}
+
+output "node_group_labels" {
+  description = "Map of labels applied to the node group"
+  value       = try(aws_eks_node_group.this[0].labels, {})
+}
+
+output "node_group_taints" {
+  description = "List of objects containing information about taints applied to the node group"
+  value       = try(aws_eks_node_group.this[0].taint, [])
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "iam_role_name" {
+  description = "The name of the IAM role"
+  value       = try(aws_iam_role.this[0].name, null)
+}
+
+output "iam_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the IAM role"
+  value       = try(aws_iam_role.this[0].arn, var.iam_role_arn)
+}
+
+output "iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = try(aws_iam_role.this[0].unique_id, null)
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the security group"
+  value       = try(aws_security_group.this[0].arn, null)
+}
+
+output "security_group_id" {
+  description = "ID of the security group"
+  value       = try(aws_security_group.this[0].id, null)
+}
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
new file mode 100644
index 0000000000..6e65933e51
--- /dev/null
+++ b/modules/eks-managed-node-group/variables.tf
@@ -0,0 +1,780 @@
+variable "create" {
+  description = "Determines whether to create EKS managed node group or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "tags" {
+  description = "A map of tags to add to all resources"
+  type        = map(string)
+  default     = {}
+}
+
+variable "region" {
+  description = "Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration"
+  type        = string
+  default     = null
+}
+
+variable "partition" {
+  description = "The AWS partition - pass through value to reduce number of GET requests from data sources"
+  type        = string
+  default     = ""
+}
+
+variable "account_id" {
+  description = "The AWS account ID - pass through value to reduce number of GET requests from data sources"
+  type        = string
+  default     = ""
+}
+
+################################################################################
+# User Data
+################################################################################
+
+variable "enable_bootstrap_user_data" {
+  description = "Determines whether the bootstrap configurations are populated within the user data template. Only valid when using a custom AMI via `ami_id`"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "cluster_name" {
+  description = "Name of associated EKS cluster"
+  type        = string
+  default     = ""
+}
+
+variable "cluster_endpoint" {
+  description = "Endpoint of associated EKS cluster"
+  type        = string
+  default     = null
+}
+
+variable "cluster_auth_base64" {
+  description = "Base64 encoded CA of associated EKS cluster"
+  type        = string
+  default     = null
+}
+
+variable "cluster_service_cidr" {
+  description = "The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself"
+  type        = string
+  default     = null
+}
+
+variable "pre_bootstrap_user_data" {
+  description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = string
+  default     = null
+}
+
+variable "post_bootstrap_user_data" {
+  description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = string
+  default     = null
+}
+
+variable "bootstrap_extra_args" {
+  description = "Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
+  type        = string
+  default     = null
+}
+
+variable "user_data_template_path" {
+  description = "Path to a local, custom user data template file to use when rendering user data"
+  type        = string
+  default     = null
+}
+
+variable "cloudinit_pre_nodeadm" {
+  description = "Array of cloud-init document parts that are created before the nodeadm document part"
+  type = list(object({
+    content      = string
+    content_type = optional(string)
+    filename     = optional(string)
+    merge_type   = optional(string)
+  }))
+  default = null
+}
+
+variable "cloudinit_post_nodeadm" {
+  description = "Array of cloud-init document parts that are created after the nodeadm document part"
+  type = list(object({
+    content      = string
+    content_type = optional(string)
+    filename     = optional(string)
+    merge_type   = optional(string)
+  }))
+  default = null
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+variable "create_launch_template" {
+  description = "Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "use_custom_launch_template" {
+  description = "Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "launch_template_id" {
+  description = "The ID of an existing launch template to use. Required when `create_launch_template` = `false` and `use_custom_launch_template` = `true`"
+  type        = string
+  default     = ""
+}
+
+variable "launch_template_name" {
+  description = "Name of launch template to be created"
+  type        = string
+  default     = null
+}
+
+variable "launch_template_use_name_prefix" {
+  description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "launch_template_description" {
+  description = "Description of the launch template"
+  type        = string
+  default     = null
+}
+
+variable "ebs_optimized" {
+  description = "If true, the launched EC2 instance(s) will be EBS-optimized"
+  type        = bool
+  default     = null
+}
+
+variable "ami_id" {
+  description = "The AMI from which to launch the instance. If not supplied, EKS will use its own default image"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "key_name" {
+  description = "The key name that should be used for the instance(s)"
+  type        = string
+  default     = null
+}
+
+variable "vpc_security_group_ids" {
+  description = "A list of security group IDs to associate"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "cluster_primary_security_group_id" {
+  description = "The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service"
+  type        = string
+  default     = null
+}
+
+variable "launch_template_default_version" {
+  description = "Default version of the launch template"
+  type        = string
+  default     = null
+}
+
+variable "update_launch_template_default_version" {
+  description = "Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version`"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "disable_api_termination" {
+  description = "If true, enables EC2 instance termination protection"
+  type        = bool
+  default     = null
+}
+
+variable "kernel_id" {
+  description = "The kernel ID"
+  type        = string
+  default     = null
+}
+
+variable "ram_disk_id" {
+  description = "The ID of the ram disk"
+  type        = string
+  default     = null
+}
+
+variable "block_device_mappings" {
+  description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
+  type = map(object({
+    device_name = optional(string)
+    ebs = optional(object({
+      delete_on_termination      = optional(bool)
+      encrypted                  = optional(bool)
+      iops                       = optional(number)
+      kms_key_id                 = optional(string)
+      snapshot_id                = optional(string)
+      throughput                 = optional(number)
+      volume_initialization_rate = optional(number)
+      volume_size                = optional(number)
+      volume_type                = optional(string)
+    }))
+    no_device    = optional(string)
+    virtual_name = optional(string)
+  }))
+  default = null
+}
+
+variable "capacity_reservation_specification" {
+  description = "Targeting for EC2 capacity reservations"
+  type = object({
+    capacity_reservation_preference = optional(string)
+    capacity_reservation_target = optional(object({
+      capacity_reservation_id                 = optional(string)
+      capacity_reservation_resource_group_arn = optional(string)
+    }))
+  })
+  default = null
+}
+
+variable "cpu_options" {
+  description = "The CPU options for the instance"
+  type = object({
+    amd_sev_snp      = optional(string)
+    core_count       = optional(number)
+    threads_per_core = optional(number)
+  })
+  default = null
+}
+
+variable "credit_specification" {
+  description = "Customize the credit specification of the instance"
+  type = object({
+    cpu_credits = optional(string)
+  })
+  default = null
+}
+
+variable "enclave_options" {
+  description = "Enable Nitro Enclaves on launched instances"
+  type = object({
+    enabled = optional(bool)
+  })
+  default = null
+}
+
+variable "instance_market_options" {
+  description = "The market (purchasing) option for the instance"
+  type = object({
+    market_type = optional(string)
+    spot_options = optional(object({
+      block_duration_minutes         = optional(number)
+      instance_interruption_behavior = optional(string)
+      max_price                      = optional(string)
+      spot_instance_type             = optional(string)
+      valid_until                    = optional(string)
+    }))
+  })
+  default = null
+}
+
+variable "maintenance_options" {
+  description = "The maintenance options for the instance"
+  type = object({
+    auto_recovery = optional(string)
+  })
+  default = null
+}
+
+variable "license_specifications" {
+  description = "A list of license specifications to associate with"
+  type = list(object({
+    license_configuration_arn = string
+  }))
+  default = null
+}
+
+variable "metadata_options" {
+  description = "Customize the metadata options for the instance"
+  type = object({
+    http_endpoint               = optional(string, "enabled")
+    http_protocol_ipv6          = optional(string)
+    http_put_response_hop_limit = optional(number, 1)
+    http_tokens                 = optional(string, "required")
+    instance_metadata_tags      = optional(string)
+  })
+  default = {
+    http_endpoint               = "enabled"
+    http_put_response_hop_limit = 1
+    http_tokens                 = "required"
+  }
+  nullable = false
+}
+
+variable "enable_monitoring" {
+  description = "Enables/disables detailed monitoring"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "enable_efa_support" {
+  description = "Determines whether to enable Elastic Fabric Adapter (EFA) support"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "enable_efa_only" {
+  description = "Determines whether to enable EFA (`false`, default) or EFA and EFA-only (`true`) network interfaces. Note: requires vpc-cni version `v1.18.4` or later"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "efa_indices" {
+  description = "The indices of the network interfaces that should be EFA-enabled. Only valid when `enable_efa_support` = `true`"
+  type        = list(number)
+  default     = [0]
+  nullable    = false
+}
+
+variable "network_interfaces" {
+  description = "Customize network interfaces to be attached at instance boot time"
+  type = list(object({
+    associate_carrier_ip_address = optional(bool)
+    associate_public_ip_address  = optional(bool)
+    connection_tracking_specification = optional(object({
+      tcp_established_timeout = optional(number)
+      udp_stream_timeout      = optional(number)
+      udp_timeout             = optional(number)
+    }))
+    delete_on_termination = optional(bool)
+    description           = optional(string)
+    device_index          = optional(number)
+    ena_srd_specification = optional(object({
+      ena_srd_enabled = optional(bool)
+      ena_srd_udp_specification = optional(object({
+        ena_srd_udp_enabled = optional(bool)
+      }))
+    }))
+    interface_type       = optional(string)
+    ipv4_address_count   = optional(number)
+    ipv4_addresses       = optional(list(string))
+    ipv4_prefix_count    = optional(number)
+    ipv4_prefixes        = optional(list(string))
+    ipv6_address_count   = optional(number)
+    ipv6_addresses       = optional(list(string))
+    ipv6_prefix_count    = optional(number)
+    ipv6_prefixes        = optional(list(string))
+    network_card_index   = optional(number)
+    network_interface_id = optional(string)
+    primary_ipv6         = optional(bool)
+    private_ip_address   = optional(string)
+    security_groups      = optional(list(string), [])
+    subnet_id            = optional(string)
+  }))
+  default  = []
+  nullable = false
+}
+
+variable "placement" {
+  description = "The placement of the instance"
+  type = object({
+    affinity                = optional(string)
+    availability_zone       = optional(string)
+    group_name              = optional(string)
+    host_id                 = optional(string)
+    host_resource_group_arn = optional(string)
+    partition_number        = optional(number)
+    spread_domain           = optional(string)
+    tenancy                 = optional(string)
+  })
+  default = null
+}
+
+variable "create_placement_group" {
+  description = "Determines whether a placement group is created & used by the node group"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "private_dns_name_options" {
+  description = "The options for the instance hostname. The default values are inherited from the subnet"
+  type = object({
+    enable_resource_name_dns_aaaa_record = optional(bool)
+    enable_resource_name_dns_a_record    = optional(bool)
+    hostname_type                        = optional(string)
+  })
+  default = null
+}
+
+variable "launch_template_tags" {
+  description = "A map of additional tags to add to the tag_specifications of launch template created"
+  type        = map(string)
+  default     = {}
+}
+
+variable "tag_specifications" {
+  description = "The tags to apply to the resources during launch"
+  type        = list(string)
+  default     = ["instance", "volume", "network-interface"]
+  nullable    = false
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+variable "subnet_ids" {
+  description = "Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME`"
+  type        = list(string)
+  default     = null
+}
+
+variable "min_size" {
+  description = "Minimum number of instances/nodes"
+  type        = number
+  default     = 1
+  nullable    = false
+}
+
+variable "max_size" {
+  description = "Maximum number of instances/nodes"
+  type        = number
+  default     = 3
+  nullable    = false
+}
+
+variable "desired_size" {
+  description = "Desired number of instances/nodes"
+  type        = number
+  default     = 1
+  nullable    = false
+}
+
+variable "name" {
+  description = "Name of the EKS managed node group"
+  type        = string
+  default     = ""
+}
+
+variable "use_name_prefix" {
+  description = "Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "ami_type" {
+  description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values"
+  type        = string
+  default     = "AL2023_x86_64_STANDARD"
+  nullable    = false
+}
+
+variable "ami_release_version" {
+  description = "The AMI version. Defaults to latest AMI release version for the given Kubernetes version and AMI type"
+  type        = string
+  default     = null
+}
+
+variable "use_latest_ami_release_version" {
+  description = "Determines whether to use the latest AMI release version for the given `ami_type` (except for `CUSTOM`). Note: `ami_type` and `kubernetes_version` must be supplied in order to enable this feature"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "capacity_type" {
+  description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`"
+  type        = string
+  default     = "ON_DEMAND"
+  nullable    = false
+}
+
+variable "disk_size" {
+  description = "Disk size in GiB for nodes. Defaults to `20`. Only valid when `use_custom_launch_template` = `false`"
+  type        = number
+  default     = null
+}
+
+variable "force_update_version" {
+  description = "Force version update if existing pods are unable to be drained due to a pod disruption budget issue"
+  type        = bool
+  default     = null
+}
+
+variable "instance_types" {
+  description = "Set of instance types associated with the EKS Node Group. Defaults to `[\"t3.medium\"]`"
+  type        = list(string)
+  default     = null
+}
+
+variable "labels" {
+  description = "Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed"
+  type        = map(string)
+  default     = null
+}
+
+variable "kubernetes_version" {
+  description = "Kubernetes version. Defaults to EKS Cluster Kubernetes version"
+  type        = string
+  default     = null
+}
+
+variable "launch_template_version" {
+  description = "Launch template version number. The default is `$Default`"
+  type        = string
+  default     = null
+}
+
+variable "remote_access" {
+  description = "Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false`"
+  type = object({
+    ec2_ssh_key               = optional(string)
+    source_security_group_ids = optional(list(string))
+  })
+  default = null
+}
+
+variable "taints" {
+  description = "The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group"
+  type = map(object({
+    key    = string
+    value  = optional(string)
+    effect = string
+  }))
+  default = null
+}
+
+variable "update_config" {
+  description = "Configuration block of settings for max unavailable resources during node group updates"
+  type = object({
+    max_unavailable            = optional(number)
+    max_unavailable_percentage = optional(number)
+  })
+  default = {
+    max_unavailable_percentage = 33
+  }
+  nullable = false
+}
+
+variable "node_repair_config" {
+  description = "The node auto repair configuration for the node group"
+  type = object({
+    enabled = optional(bool, true)
+  })
+  default = null
+}
+
+variable "timeouts" {
+  description = "Create, update, and delete timeout configurations for the node group"
+  type = object({
+    create = optional(string)
+    update = optional(string)
+    delete = optional(string)
+  })
+  default = null
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+variable "create_iam_role" {
+  description = "Determines whether an IAM role is created or to use an existing IAM role"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "cluster_ip_family" {
+  description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`"
+  type        = string
+  default     = "ipv4"
+  nullable    = false
+}
+
+variable "iam_role_arn" {
+  description = "Existing IAM role ARN for the node group. Required if `create_iam_role` is set to `false`"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_name" {
+  description = "Name to use on IAM role created"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_use_name_prefix" {
+  description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_path" {
+  description = "IAM role path"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_description" {
+  description = "Description of the role"
+  type        = string
+  default     = "EKS managed node group IAM role"
+  nullable    = false
+}
+
+variable "iam_role_permissions_boundary" {
+  description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_attach_cni_policy" {
+  description = "Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_additional_policies" {
+  description = "Additional policies to be added to the IAM role"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+variable "iam_role_tags" {
+  description = "A map of additional tags to add to the IAM role created"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+################################################################################
+# IAM Role Policy
+################################################################################
+
+variable "create_iam_role_policy" {
+  description = "Determines whether an IAM role policy is created or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_policy_statements" {
+  description = "A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed"
+  type = list(object({
+    sid           = optional(string)
+    actions       = optional(list(string))
+    not_actions   = optional(list(string))
+    effect        = optional(string)
+    resources     = optional(list(string))
+    not_resources = optional(list(string))
+    principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    not_principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    condition = optional(list(object({
+      test     = string
+      values   = list(string)
+      variable = string
+    })))
+  }))
+  default = null
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+variable "create_security_group" {
+  description = "Determines if a security group is created"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "security_group_name" {
+  description = "Name to use on security group created"
+  type        = string
+  default     = null
+}
+
+variable "security_group_use_name_prefix" {
+  description = "Determines whether the security group name (`security_group_name`) is used as a prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "security_group_description" {
+  description = "Description of the security group created"
+  type        = string
+  default     = null
+}
+
+variable "security_group_ingress_rules" {
+  description = "Security group ingress rules to add to the security group created"
+  type = map(object({
+    name = optional(string)
+
+    cidr_ipv4                    = optional(string)
+    cidr_ipv6                    = optional(string)
+    description                  = optional(string)
+    from_port                    = optional(string)
+    ip_protocol                  = optional(string, "tcp")
+    prefix_list_id               = optional(string)
+    referenced_security_group_id = optional(string)
+    self                         = optional(bool, false)
+    tags                         = optional(map(string), {})
+    to_port                      = optional(string)
+  }))
+  default = {}
+}
+
+variable "security_group_egress_rules" {
+  description = "Security group egress rules to add to the security group created"
+  type = map(object({
+    name = optional(string)
+
+    cidr_ipv4                    = optional(string)
+    cidr_ipv6                    = optional(string)
+    description                  = optional(string)
+    from_port                    = optional(string)
+    ip_protocol                  = optional(string, "tcp")
+    prefix_list_id               = optional(string)
+    referenced_security_group_id = optional(string)
+    self                         = optional(bool, false)
+    tags                         = optional(map(string), {})
+    to_port                      = optional(string)
+  }))
+  default = {}
+}
+
+variable "security_group_tags" {
+  description = "A map of additional tags to add to the security group created"
+  type        = map(string)
+  default     = {}
+}
diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/modules/eks-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md
new file mode 100644
index 0000000000..61ef2b980f
--- /dev/null
+++ b/modules/fargate-profile/README.md
@@ -0,0 +1,98 @@
+# EKS Fargate Profile Module
+
+Configuration in this directory creates a Fargate EKS Profile
+
+## Usage
+
+```hcl
+module "fargate_profile" {
+  source = "terraform-aws-modules/eks/aws//modules/fargate-profile"
+
+  name         = "separate-fargate-profile"
+  cluster_name = "my-cluster"
+
+  subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+  selectors = [{
+    namespace = "kube-system"
+  }]
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource |
+| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_account_id"></a> [account\_id](#input\_account\_id) | The AWS account ID - pass through value to reduce number of GET requests from data sources | `string` | `""` | no |
+| <a name="input_cluster_ip_family"></a> [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no |
+| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
+| <a name="input_create"></a> [create](#input\_create) | Determines whether to create Fargate profile or not | `bool` | `true` | no |
+| <a name="input_create_iam_role"></a> [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| <a name="input_create_iam_role_policy"></a> [create\_iam\_role\_policy](#input\_create\_iam\_role\_policy) | Determines whether an IAM role policy is created or not | `bool` | `true` | no |
+| <a name="input_iam_role_additional_policies"></a> [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no |
+| <a name="input_iam_role_arn"></a> [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the Fargate profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| <a name="input_iam_role_attach_cni_policy"></a> [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| <a name="input_iam_role_description"></a> [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `"Fargate profile IAM role"` | no |
+| <a name="input_iam_role_name"></a> [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `""` | no |
+| <a name="input_iam_role_path"></a> [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| <a name="input_iam_role_permissions_boundary"></a> [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| <a name="input_iam_role_policy_statements"></a> [iam\_role\_policy\_statements](#input\_iam\_role\_policy\_statements) | A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed | <pre>list(object({<br/>    sid           = optional(string)<br/>    actions       = optional(list(string))<br/>    not_actions   = optional(list(string))<br/>    effect        = optional(string)<br/>    resources     = optional(list(string))<br/>    not_resources = optional(list(string))<br/>    principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    not_principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    condition = optional(list(object({<br/>      test     = string<br/>      values   = list(string)<br/>      variable = string<br/>    })))<br/>  }))</pre> | `null` | no |
+| <a name="input_iam_role_tags"></a> [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| <a name="input_iam_role_use_name_prefix"></a> [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_name"></a> [name](#input\_name) | Name of the EKS Fargate Profile | `string` | `""` | no |
+| <a name="input_partition"></a> [partition](#input\_partition) | The AWS partition - pass through value to reduce number of GET requests from data sources | `string` | `""` | no |
+| <a name="input_region"></a> [region](#input\_region) | Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration | `string` | `null` | no |
+| <a name="input_selectors"></a> [selectors](#input\_selectors) | Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile | <pre>list(object({<br/>    labels    = optional(map(string))<br/>    namespace = string<br/>  }))</pre> | `null` | no |
+| <a name="input_subnet_ids"></a> [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs for the EKS Fargate Profile | `list(string)` | `[]` | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| <a name="input_timeouts"></a> [timeouts](#input\_timeouts) | Create and delete timeout configurations for the Fargate Profile | <pre>object({<br/>    create = optional(string)<br/>    delete = optional(string)<br/>  })</pre> | `null` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_fargate_profile_arn"></a> [fargate\_profile\_arn](#output\_fargate\_profile\_arn) | Amazon Resource Name (ARN) of the EKS Fargate Profile |
+| <a name="output_fargate_profile_id"></a> [fargate\_profile\_id](#output\_fargate\_profile\_id) | EKS Cluster name and EKS Fargate Profile name separated by a colon (`:`) |
+| <a name="output_fargate_profile_pod_execution_role_arn"></a> [fargate\_profile\_pod\_execution\_role\_arn](#output\_fargate\_profile\_pod\_execution\_role\_arn) | Amazon Resource Name (ARN) of the EKS Fargate Profile Pod execution role ARN |
+| <a name="output_fargate_profile_status"></a> [fargate\_profile\_status](#output\_fargate\_profile\_status) | Status of the EKS Fargate Profile |
+| <a name="output_iam_role_arn"></a> [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role |
+| <a name="output_iam_role_name"></a> [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role |
+| <a name="output_iam_role_unique_id"></a> [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+<!-- END_TF_DOCS -->
diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf
new file mode 100644
index 0000000000..78c94357f4
--- /dev/null
+++ b/modules/fargate-profile/main.tf
@@ -0,0 +1,190 @@
+data "aws_region" "current" {
+  count = var.create ? 1 : 0
+
+  region = var.region
+}
+data "aws_partition" "current" {
+  count = var.create && var.partition == "" ? 1 : 0
+}
+data "aws_caller_identity" "current" {
+  count = var.create && var.account_id == "" ? 1 : 0
+}
+
+locals {
+  account_id = try(data.aws_caller_identity.current[0].account_id, var.account_id)
+  partition  = try(data.aws_partition.current[0].partition, var.partition)
+  region     = try(data.aws_region.current[0].region, "")
+}
+
+locals {
+  create_iam_role = var.create && var.create_iam_role
+
+  iam_role_name          = coalesce(var.iam_role_name, var.name, "fargate-profile")
+  iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy"
+
+  ipv4_cni_policy = { for k, v in {
+    AmazonEKS_CNI_Policy = "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy"
+  } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" }
+  ipv6_cni_policy = { for k, v in {
+    AmazonEKS_CNI_IPv6_Policy = "arn:${local.partition}:iam::${local.account_id}:policy/AmazonEKS_CNI_IPv6_Policy"
+  } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" }
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+data "aws_iam_policy_document" "assume_role_policy" {
+  count = local.create_iam_role ? 1 : 0
+
+  statement {
+    effect  = "Allow"
+    actions = ["sts:AssumeRole"]
+
+    principals {
+      type        = "Service"
+      identifiers = ["eks-fargate-pods.amazonaws.com"]
+    }
+
+    condition {
+      test     = "ArnLike"
+      variable = "aws:SourceArn"
+
+      values = [
+        "arn:${local.partition}:eks:${local.region}:${local.account_id}:fargateprofile/${var.cluster_name}/*",
+      ]
+    }
+  }
+}
+
+resource "aws_iam_role" "this" {
+  count = local.create_iam_role ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  path        = var.iam_role_path
+  description = var.iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.assume_role_policy[0].json
+  permissions_boundary  = var.iam_role_permissions_boundary
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.iam_role_tags)
+}
+
+resource "aws_iam_role_policy_attachment" "this" {
+  for_each = { for k, v in merge(
+    {
+      AmazonEKSFargatePodExecutionRolePolicy = "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy"
+    },
+    local.ipv4_cni_policy,
+    local.ipv6_cni_policy
+  ) : k => v if local.create_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "additional" {
+  for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+################################################################################
+# IAM Role Policy
+################################################################################
+
+locals {
+  create_iam_role_policy = local.create_iam_role && var.create_iam_role_policy && var.iam_role_policy_statements != null
+}
+
+data "aws_iam_policy_document" "role" {
+  count = local.create_iam_role_policy ? 1 : 0
+
+  dynamic "statement" {
+    for_each = var.iam_role_policy_statements != null ? var.iam_role_policy_statements : []
+
+    content {
+      sid           = statement.value.sid
+      actions       = statement.value.actions
+      not_actions   = statement.value.not_actions
+      effect        = statement.value.effect
+      resources     = statement.value.resources
+      not_resources = statement.value.not_resources
+
+      dynamic "principals" {
+        for_each = statement.value.principals != null ? statement.value.principals : []
+
+        content {
+          type        = principals.value.type
+          identifiers = principals.value.identifiers
+        }
+      }
+
+      dynamic "not_principals" {
+        for_each = statement.value.not_principals != null ? statement.value.not_principals : []
+
+        content {
+          type        = not_principals.value.type
+          identifiers = not_principals.value.identifiers
+        }
+      }
+
+      dynamic "condition" {
+        for_each = statement.value.condition != null ? statement.value.condition : []
+
+        content {
+          test     = condition.value.test
+          values   = condition.value.values
+          variable = condition.value.variable
+        }
+      }
+    }
+  }
+}
+
+resource "aws_iam_role_policy" "this" {
+  count = local.create_iam_role_policy ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  policy      = data.aws_iam_policy_document.role[0].json
+  role        = aws_iam_role.this[0].id
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+resource "aws_eks_fargate_profile" "this" {
+  count = var.create ? 1 : 0
+
+  region = var.region
+
+  cluster_name           = var.cluster_name
+  fargate_profile_name   = var.name
+  pod_execution_role_arn = var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn
+  subnet_ids             = var.subnet_ids
+
+  dynamic "selector" {
+    for_each = var.selectors != null ? var.selectors : []
+
+    content {
+      namespace = selector.value.namespace
+      labels    = selector.value.labels
+    }
+  }
+
+  dynamic "timeouts" {
+    for_each = var.timeouts != null ? [var.timeouts] : []
+
+    content {
+      create = var.timeouts.create
+      delete = var.timeouts.delete
+    }
+  }
+
+  tags = var.tags
+}
diff --git a/modules/fargate-profile/migrations.tf b/modules/fargate-profile/migrations.tf
new file mode 100644
index 0000000000..02494f6893
--- /dev/null
+++ b/modules/fargate-profile/migrations.tf
@@ -0,0 +1,15 @@
+################################################################################
+# Migrations: v20.8 -> v20.9
+################################################################################
+
+# Node IAM role policy attachment
+# Commercial partition only - `moved` does now allow multiple moves to same target
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEKSFargatePodExecutionRolePolicy"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]
+}
diff --git a/modules/fargate-profile/outputs.tf b/modules/fargate-profile/outputs.tf
new file mode 100644
index 0000000000..96763bfb1f
--- /dev/null
+++ b/modules/fargate-profile/outputs.tf
@@ -0,0 +1,42 @@
+################################################################################
+# IAM Role
+################################################################################
+
+output "iam_role_name" {
+  description = "The name of the IAM role"
+  value       = try(aws_iam_role.this[0].name, null)
+}
+
+output "iam_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the IAM role"
+  value       = try(aws_iam_role.this[0].arn, var.iam_role_arn)
+}
+
+output "iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = try(aws_iam_role.this[0].unique_id, null)
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profile_arn" {
+  description = "Amazon Resource Name (ARN) of the EKS Fargate Profile"
+  value       = try(aws_eks_fargate_profile.this[0].arn, null)
+}
+
+output "fargate_profile_id" {
+  description = "EKS Cluster name and EKS Fargate Profile name separated by a colon (`:`)"
+  value       = try(aws_eks_fargate_profile.this[0].id, null)
+}
+
+output "fargate_profile_status" {
+  description = "Status of the EKS Fargate Profile"
+  value       = try(aws_eks_fargate_profile.this[0].status, null)
+}
+
+output "fargate_profile_pod_execution_role_arn" {
+  description = "Amazon Resource Name (ARN) of the EKS Fargate Profile Pod execution role ARN"
+  value       = try(aws_eks_fargate_profile.this[0].pod_execution_role_arn, null)
+}
diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf
new file mode 100644
index 0000000000..5d87e56447
--- /dev/null
+++ b/modules/fargate-profile/variables.tf
@@ -0,0 +1,186 @@
+variable "create" {
+  description = "Determines whether to create Fargate profile or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "tags" {
+  description = "A map of tags to add to all resources"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+variable "region" {
+  description = "Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration"
+  type        = string
+  default     = null
+}
+
+variable "partition" {
+  description = "The AWS partition - pass through value to reduce number of GET requests from data sources"
+  type        = string
+  default     = ""
+}
+
+variable "account_id" {
+  description = "The AWS account ID - pass through value to reduce number of GET requests from data sources"
+  type        = string
+  default     = ""
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+variable "create_iam_role" {
+  description = "Determines whether an IAM role is created or to use an existing IAM role"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "cluster_ip_family" {
+  description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`"
+  type        = string
+  default     = "ipv4"
+}
+
+variable "iam_role_arn" {
+  description = "Existing IAM role ARN for the Fargate profile. Required if `create_iam_role` is set to `false`"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_name" {
+  description = "Name to use on IAM role created"
+  type        = string
+  default     = ""
+}
+
+variable "iam_role_use_name_prefix" {
+  description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_path" {
+  description = "IAM role path"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_description" {
+  description = "Description of the role"
+  type        = string
+  default     = "Fargate profile IAM role"
+  nullable    = false
+}
+
+variable "iam_role_permissions_boundary" {
+  description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_attach_cni_policy" {
+  description = "Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_additional_policies" {
+  description = "Additional policies to be added to the IAM role"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+variable "iam_role_tags" {
+  description = "A map of additional tags to add to the IAM role created"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+################################################################################
+# IAM Role Policy
+################################################################################
+
+variable "create_iam_role_policy" {
+  description = "Determines whether an IAM role policy is created or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_policy_statements" {
+  description = "A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed"
+  type = list(object({
+    sid           = optional(string)
+    actions       = optional(list(string))
+    not_actions   = optional(list(string))
+    effect        = optional(string)
+    resources     = optional(list(string))
+    not_resources = optional(list(string))
+    principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    not_principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    condition = optional(list(object({
+      test     = string
+      values   = list(string)
+      variable = string
+    })))
+  }))
+  default = null
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+variable "cluster_name" {
+  description = "Name of the EKS cluster"
+  type        = string
+  default     = ""
+}
+
+variable "name" {
+  description = "Name of the EKS Fargate Profile"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "subnet_ids" {
+  description = "A list of subnet IDs for the EKS Fargate Profile"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "selectors" {
+  description = "Configuration block(s) for selecting Kubernetes Pods to execute with this Fargate Profile"
+  type = list(object({
+    labels    = optional(map(string))
+    namespace = string
+  }))
+  default = null
+}
+
+variable "timeouts" {
+  description = "Create and delete timeout configurations for the Fargate Profile"
+  type = object({
+    create = optional(string)
+    delete = optional(string)
+  })
+  default = null
+}
diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/modules/fargate-profile/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/modules/fargate/README.md b/modules/fargate/README.md
deleted file mode 100644
index 858ec968b7..0000000000
--- a/modules/fargate/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# eks `fargate` submodule
-
-Helper submodule to create and manage resources related to `aws_eks_fargate_profile`.
-
-## Assumptions
-* Designed for use by the parent module and not directly by end users
-
-## `fargate_profile` keys
-`fargate_profile` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_fargate_profile` name. Inner map can take the below values.
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| name | Fargate profile name | `string` | Auto generated in the following format `[cluster_name]-fargate-[fargate_profile_map_key]`| no |
-| selectors | A list of Kubernetes selectors. See examples/fargate/main.tf for example format. | <pre>list(map({<br>namespace = string<br>labels = map(string)<br>}))</pre>| `[]` | no |
-| subnets | List of subnet IDs. Will replace the root module subnets. | `list(string)` | `var.subnets` | no |
-| tags | Key-value map of resource tags. Will be merged with root module tags. | `map(string)` | `var.tags` | no |
-
-<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
-## Requirements
-
-| Name | Version |
-|------|---------|
-| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
-| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.40.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.40.0 |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_eks_fargate_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_fargate_profile) | resource |
-| [aws_iam_role.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_policy_document.eks_fargate_pod_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_role.custom_fargate_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. | `string` | n/a | yes |
-| <a name="input_create_eks"></a> [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| <a name="input_create_fargate_pod_execution_role"></a> [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created. | `bool` | `true` | no |
-| <a name="input_eks_depends_on"></a> [eks\_depends\_on](#input\_eks\_depends\_on) | List of references to other resources this submodule depends on. | `any` | `null` | no |
-| <a name="input_fargate_pod_execution_role_name"></a> [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no |
-| <a name="input_fargate_profiles"></a> [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no |
-| <a name="input_iam_path"></a> [iam\_path](#input\_iam\_path) | IAM roles will be created on this path. | `string` | `"/"` | no |
-| <a name="input_iam_policy_arn_prefix"></a> [iam\_policy\_arn\_prefix](#input\_iam\_policy\_arn\_prefix) | IAM policy prefix with the correct AWS partition. | `string` | n/a | yes |
-| <a name="input_permissions_boundary"></a> [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no |
-| <a name="input_subnets"></a> [subnets](#input\_subnets) | A list of subnets for the EKS Fargate profiles. | `list(string)` | `[]` | no |
-| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources. | `map(string)` | `{}` | no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| <a name="output_aws_auth_roles"></a> [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
-| <a name="output_fargate_profile_arns"></a> [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Amazon Resource Name (ARN) of the EKS Fargate Profiles. |
-| <a name="output_fargate_profile_ids"></a> [fargate\_profile\_ids](#output\_fargate\_profile\_ids) | EKS Cluster name and EKS Fargate Profile names separated by a colon (:). |
-| <a name="output_iam_role_arn"></a> [iam\_role\_arn](#output\_iam\_role\_arn) | IAM role ARN for EKS Fargate pods |
-| <a name="output_iam_role_name"></a> [iam\_role\_name](#output\_iam\_role\_name) | IAM role name for EKS Fargate pods |
-<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
diff --git a/modules/fargate/data.tf b/modules/fargate/data.tf
deleted file mode 100644
index ee8e0d8e96..0000000000
--- a/modules/fargate/data.tf
+++ /dev/null
@@ -1,17 +0,0 @@
-data "aws_iam_policy_document" "eks_fargate_pod_assume_role" {
-  count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
-  statement {
-    effect  = "Allow"
-    actions = ["sts:AssumeRole"]
-
-    principals {
-      type        = "Service"
-      identifiers = ["eks-fargate-pods.amazonaws.com"]
-    }
-  }
-}
-
-data "aws_iam_role" "custom_fargate_iam_role" {
-  count = local.create_eks && !var.create_fargate_pod_execution_role ? 1 : 0
-  name  = var.fargate_pod_execution_role_name
-}
diff --git a/modules/fargate/fargate.tf b/modules/fargate/fargate.tf
deleted file mode 100644
index caa73faf88..0000000000
--- a/modules/fargate/fargate.tf
+++ /dev/null
@@ -1,33 +0,0 @@
-resource "aws_iam_role" "eks_fargate_pod" {
-  count                = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
-  name_prefix          = format("%s-fargate", substr(var.cluster_name, 0, 24))
-  assume_role_policy   = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json
-  permissions_boundary = var.permissions_boundary
-  tags                 = var.tags
-  path                 = var.iam_path
-}
-
-resource "aws_iam_role_policy_attachment" "eks_fargate_pod" {
-  count      = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0
-  policy_arn = "${var.iam_policy_arn_prefix}/AmazonEKSFargatePodExecutionRolePolicy"
-  role       = aws_iam_role.eks_fargate_pod[0].name
-}
-
-resource "aws_eks_fargate_profile" "this" {
-  for_each               = local.create_eks ? local.fargate_profiles_expanded : {}
-  cluster_name           = var.cluster_name
-  fargate_profile_name   = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-")))
-  pod_execution_role_arn = local.pod_execution_role_arn
-  subnet_ids             = lookup(each.value, "subnets", var.subnets)
-  tags                   = each.value.tags
-
-  dynamic "selector" {
-    for_each = each.value.selectors
-    content {
-      namespace = selector.value["namespace"]
-      labels    = lookup(selector.value, "labels", {})
-    }
-  }
-
-  depends_on = [var.eks_depends_on]
-}
diff --git a/modules/fargate/locals.tf b/modules/fargate/locals.tf
deleted file mode 100644
index 18ba96403b..0000000000
--- a/modules/fargate/locals.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-locals {
-  create_eks              = var.create_eks && length(var.fargate_profiles) > 0
-  pod_execution_role_arn  = var.create_fargate_pod_execution_role ? element(concat(aws_iam_role.eks_fargate_pod.*.arn, [""]), 0) : element(concat(data.aws_iam_role.custom_fargate_iam_role.*.arn, [""]), 0)
-  pod_execution_role_name = var.create_fargate_pod_execution_role ? element(concat(aws_iam_role.eks_fargate_pod.*.name, [""]), 0) : element(concat(data.aws_iam_role.custom_fargate_iam_role.*.name, [""]), 0)
-
-  fargate_profiles_expanded = { for k, v in var.fargate_profiles : k => merge(
-    v,
-    { tags = merge(var.tags, lookup(v, "tags", {})) },
-  ) if var.create_eks }
-}
diff --git a/modules/fargate/outputs.tf b/modules/fargate/outputs.tf
deleted file mode 100644
index 126ba6e385..0000000000
--- a/modules/fargate/outputs.tf
+++ /dev/null
@@ -1,29 +0,0 @@
-output "fargate_profile_ids" {
-  description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)."
-  value       = [for f in aws_eks_fargate_profile.this : f.id]
-}
-
-output "fargate_profile_arns" {
-  description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles."
-  value       = [for f in aws_eks_fargate_profile.this : f.arn]
-}
-
-output "iam_role_name" {
-  description = "IAM role name for EKS Fargate pods"
-  value       = local.pod_execution_role_name
-}
-
-output "iam_role_arn" {
-  description = "IAM role ARN for EKS Fargate pods"
-  value       = local.pod_execution_role_arn
-}
-
-output "aws_auth_roles" {
-  description = "Roles for use in aws-auth ConfigMap"
-  value = [
-    for i in range(1) : {
-      worker_role_arn = local.pod_execution_role_arn
-      platform        = "fargate"
-    } if local.create_eks
-  ]
-}
diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf
deleted file mode 100644
index acfd69bd96..0000000000
--- a/modules/fargate/variables.tf
+++ /dev/null
@@ -1,65 +0,0 @@
-variable "cluster_name" {
-  description = "Name of the EKS cluster."
-  type        = string
-}
-
-variable "create_eks" {
-  description = "Controls if EKS resources should be created (it affects almost all resources)"
-  type        = bool
-  default     = true
-}
-
-variable "iam_path" {
-  description = "IAM roles will be created on this path."
-  type        = string
-  default     = "/"
-}
-
-variable "iam_policy_arn_prefix" {
-  description = "IAM policy prefix with the correct AWS partition."
-  type        = string
-}
-
-variable "create_fargate_pod_execution_role" {
-  description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created."
-  type        = bool
-  default     = true
-}
-
-variable "fargate_pod_execution_role_name" {
-  description = "The IAM Role that provides permissions for the EKS Fargate Profile."
-  type        = string
-  default     = null
-}
-
-variable "fargate_profiles" {
-  description = "Fargate profiles to create. See `fargate_profile` keys section in README.md for more details"
-  type        = any
-  default     = {}
-}
-
-variable "permissions_boundary" {
-  description = "If provided, all IAM roles will be created with this permissions boundary attached."
-  type        = string
-  default     = null
-}
-
-variable "subnets" {
-  description = "A list of subnets for the EKS Fargate profiles."
-  type        = list(string)
-  default     = []
-}
-
-variable "tags" {
-  description = "A map of tags to add to all resources."
-  type        = map(string)
-  default     = {}
-}
-
-# Hack for a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2
-# Will be removed in Terraform 0.13 with the support of module's `depends_on` https://github.com/hashicorp/terraform/issues/10462
-variable "eks_depends_on" {
-  description = "List of references to other resources this submodule depends on."
-  type        = any
-  default     = null
-}
diff --git a/modules/fargate/versions.tf b/modules/fargate/versions.tf
deleted file mode 100644
index 2051547e51..0000000000
--- a/modules/fargate/versions.tf
+++ /dev/null
@@ -1,7 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws = ">= 3.40.0"
-  }
-}
diff --git a/modules/hybrid-node-role/README.md b/modules/hybrid-node-role/README.md
new file mode 100644
index 0000000000..176aabcdf4
--- /dev/null
+++ b/modules/hybrid-node-role/README.md
@@ -0,0 +1,159 @@
+# EKS Hybrid Node Role Module
+
+Terraform module which creates IAM role and policy resources for Amazon EKS Hybrid Node(s).
+
+## Usage
+
+EKS Hybrid nodes use the AWS IAM Authenticator and temporary IAM credentials provisioned by AWS SSM or AWS IAM Roles Anywhere to authenticate with the EKS cluster. This module supports both SSM and IAM Roles Anywhere based IAM permissions.
+
+### SSM
+
+```hcl
+module "eks" {
+  source = "terraform-aws-modules/eks/aws"
+
+  ...
+  access_entries = {
+    hybrid-node-role = {
+      principal_arn = module.eks_hybrid_node_role.arn
+      type          = "HYBRID_LINUX"
+    }
+  }
+}
+
+module "eks_hybrid_node_role" {
+  source = "terraform-aws-modules/eks/aws//modules/hybrid-node-role"
+
+  name = "hybrid"
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+### IAM Roles Anywhere
+
+```hcl
+module "eks" {
+  source = "terraform-aws-modules/eks/aws"
+
+  ...
+  access_entries = {
+    hybrid-node-role = {
+      principal_arn = module.eks_hybrid_node_role.arn
+      type          = "HYBRID_LINUX"
+    }
+  }
+}
+
+module "eks_hybrid_node_role" {
+  source = "terraform-aws-modules/eks/aws//modules/hybrid-node-role"
+
+  name = "hybrid-ira"
+
+  enable_ira = true
+
+  ira_trust_anchor_source_type           = "CERTIFICATE_BUNDLE"
+  ira_trust_anchor_x509_certificate_data = <<-EOT
+    MIIFMzCCAxugAwIBAgIRAMnVXU7ncv/+Cl16eJbZ9hswDQYJKoZIhvcNAQELBQAw
+    ...
+    MGx/BMRkrNUVcg3xA0lhECo/olodCkmZo5/mjybbjFQwJzDSKFoW
+  EOT
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_iam_policy.intermediate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.intermediate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.intermediate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_rolesanywhere_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/rolesanywhere_profile) | resource |
+| [aws_rolesanywhere_trust_anchor.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/rolesanywhere_trust_anchor) | resource |
+| [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.intermediate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.intermediate_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_cluster_arns"></a> [cluster\_arns](#input\_cluster\_arns) | List of EKS cluster ARNs to allow the node to describe | `list(string)` | <pre>[<br/>  "*"<br/>]</pre> | no |
+| <a name="input_create"></a> [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no |
+| <a name="input_description"></a> [description](#input\_description) | IAM role description | `string` | `"EKS Hybrid Node IAM role"` | no |
+| <a name="input_enable_ira"></a> [enable\_ira](#input\_enable\_ira) | Enables IAM Roles Anywhere based IAM permissions on the node | `bool` | `false` | no |
+| <a name="input_enable_pod_identity"></a> [enable\_pod\_identity](#input\_enable\_pod\_identity) | Enables EKS Pod Identity based IAM permissions on the node | `bool` | `true` | no |
+| <a name="input_intermediate_policy_name"></a> [intermediate\_policy\_name](#input\_intermediate\_policy\_name) | Name of the IAM policy | `string` | `null` | no |
+| <a name="input_intermediate_policy_statements"></a> [intermediate\_policy\_statements](#input\_intermediate\_policy\_statements) | A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed | <pre>list(object({<br/>    sid           = optional(string)<br/>    actions       = optional(list(string))<br/>    not_actions   = optional(list(string))<br/>    effect        = optional(string)<br/>    resources     = optional(list(string))<br/>    not_resources = optional(list(string))<br/>    principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    not_principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    condition = optional(list(object({<br/>      test     = string<br/>      values   = list(string)<br/>      variable = string<br/>    })))<br/>  }))</pre> | `null` | no |
+| <a name="input_intermediate_policy_use_name_prefix"></a> [intermediate\_policy\_use\_name\_prefix](#input\_intermediate\_policy\_use\_name\_prefix) | Determines whether the name of the IAM policy (`intermediate_policy_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_intermediate_role_description"></a> [intermediate\_role\_description](#input\_intermediate\_role\_description) | IAM role description | `string` | `"EKS Hybrid Node IAM Roles Anywhere intermediate IAM role"` | no |
+| <a name="input_intermediate_role_name"></a> [intermediate\_role\_name](#input\_intermediate\_role\_name) | Name of the IAM role | `string` | `null` | no |
+| <a name="input_intermediate_role_path"></a> [intermediate\_role\_path](#input\_intermediate\_role\_path) | Path of the IAM role | `string` | `"/"` | no |
+| <a name="input_intermediate_role_policies"></a> [intermediate\_role\_policies](#input\_intermediate\_role\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no |
+| <a name="input_intermediate_role_use_name_prefix"></a> [intermediate\_role\_use\_name\_prefix](#input\_intermediate\_role\_use\_name\_prefix) | Determines whether the name of the IAM role (`intermediate_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_ira_profile_duration_seconds"></a> [ira\_profile\_duration\_seconds](#input\_ira\_profile\_duration\_seconds) | The number of seconds the vended session credentials are valid for. Defaults to `3600` | `number` | `null` | no |
+| <a name="input_ira_profile_managed_policy_arns"></a> [ira\_profile\_managed\_policy\_arns](#input\_ira\_profile\_managed\_policy\_arns) | A list of managed policy ARNs that apply to the vended session credentials | `list(string)` | `[]` | no |
+| <a name="input_ira_profile_name"></a> [ira\_profile\_name](#input\_ira\_profile\_name) | Name of the Roles Anywhere profile | `string` | `null` | no |
+| <a name="input_ira_profile_require_instance_properties"></a> [ira\_profile\_require\_instance\_properties](#input\_ira\_profile\_require\_instance\_properties) | Specifies whether instance properties are required in [CreateSession](https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) requests with this profile | `bool` | `null` | no |
+| <a name="input_ira_profile_session_policy"></a> [ira\_profile\_session\_policy](#input\_ira\_profile\_session\_policy) | A session policy that applies to the trust boundary of the vended session credentials | `string` | `null` | no |
+| <a name="input_ira_trust_anchor_acm_pca_arn"></a> [ira\_trust\_anchor\_acm\_pca\_arn](#input\_ira\_trust\_anchor\_acm\_pca\_arn) | The ARN of the ACM PCA that issued the trust anchor certificate | `string` | `null` | no |
+| <a name="input_ira_trust_anchor_name"></a> [ira\_trust\_anchor\_name](#input\_ira\_trust\_anchor\_name) | Name of the Roles Anywhere trust anchor | `string` | `null` | no |
+| <a name="input_ira_trust_anchor_notification_settings"></a> [ira\_trust\_anchor\_notification\_settings](#input\_ira\_trust\_anchor\_notification\_settings) | Notification settings for the trust anchor | <pre>list(object({<br/>    channel   = optional(string)<br/>    enabled   = optional(bool)<br/>    event     = optional(string)<br/>    threshold = optional(number)<br/>  }))</pre> | `null` | no |
+| <a name="input_ira_trust_anchor_source_type"></a> [ira\_trust\_anchor\_source\_type](#input\_ira\_trust\_anchor\_source\_type) | The source type of the trust anchor | `string` | `null` | no |
+| <a name="input_ira_trust_anchor_x509_certificate_data"></a> [ira\_trust\_anchor\_x509\_certificate\_data](#input\_ira\_trust\_anchor\_x509\_certificate\_data) | The X.509 certificate data of the trust anchor | `string` | `null` | no |
+| <a name="input_max_session_duration"></a> [max\_session\_duration](#input\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no |
+| <a name="input_name"></a> [name](#input\_name) | Name of the IAM role | `string` | `"EKSHybridNode"` | no |
+| <a name="input_path"></a> [path](#input\_path) | Path of the IAM role | `string` | `"/"` | no |
+| <a name="input_permissions_boundary_arn"></a> [permissions\_boundary\_arn](#input\_permissions\_boundary\_arn) | Permissions boundary ARN to use for the IAM role | `string` | `null` | no |
+| <a name="input_policies"></a> [policies](#input\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no |
+| <a name="input_policy_description"></a> [policy\_description](#input\_policy\_description) | IAM policy description | `string` | `"EKS Hybrid Node IAM role policy"` | no |
+| <a name="input_policy_name"></a> [policy\_name](#input\_policy\_name) | Name of the IAM policy | `string` | `"EKSHybridNode"` | no |
+| <a name="input_policy_path"></a> [policy\_path](#input\_policy\_path) | Path of the IAM policy | `string` | `"/"` | no |
+| <a name="input_policy_statements"></a> [policy\_statements](#input\_policy\_statements) | A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed | <pre>list(object({<br/>    sid           = optional(string)<br/>    actions       = optional(list(string))<br/>    not_actions   = optional(list(string))<br/>    effect        = optional(string)<br/>    resources     = optional(list(string))<br/>    not_resources = optional(list(string))<br/>    principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    not_principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    condition = optional(list(object({<br/>      test     = string<br/>      values   = list(string)<br/>      variable = string<br/>    })))<br/>  }))</pre> | `null` | no |
+| <a name="input_policy_use_name_prefix"></a> [policy\_use\_name\_prefix](#input\_policy\_use\_name\_prefix) | Determines whether the name of the IAM policy (`policy_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | A map of additional tags to add the the IAM role | `map(string)` | `{}` | no |
+| <a name="input_trust_anchor_arns"></a> [trust\_anchor\_arns](#input\_trust\_anchor\_arns) | List of IAM Roles Anywhere trust anchor ARNs. Required if `enable_ira` is set to `true` | `list(string)` | `[]` | no |
+| <a name="input_use_name_prefix"></a> [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether the name of the IAM role (`name`) is used as a prefix | `bool` | `true` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_arn"></a> [arn](#output\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_intermediate_role_arn"></a> [intermediate\_role\_arn](#output\_intermediate\_role\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_intermediate_role_name"></a> [intermediate\_role\_name](#output\_intermediate\_role\_name) | The name of the node IAM role |
+| <a name="output_intermediate_role_unique_id"></a> [intermediate\_role\_unique\_id](#output\_intermediate\_role\_unique\_id) | Stable and unique string identifying the node IAM role |
+| <a name="output_name"></a> [name](#output\_name) | The name of the node IAM role |
+| <a name="output_unique_id"></a> [unique\_id](#output\_unique\_id) | Stable and unique string identifying the node IAM role |
+<!-- END_TF_DOCS -->
diff --git a/modules/hybrid-node-role/main.tf b/modules/hybrid-node-role/main.tf
new file mode 100644
index 0000000000..1d14f41b40
--- /dev/null
+++ b/modules/hybrid-node-role/main.tf
@@ -0,0 +1,369 @@
+data "aws_partition" "current" {
+  count = var.create ? 1 : 0
+}
+
+locals {
+  partition = try(data.aws_partition.current[0].partition, "")
+}
+
+################################################################################
+# Node IAM Role
+################################################################################
+
+data "aws_iam_policy_document" "assume_role" {
+  count = var.create ? 1 : 0
+
+  # SSM
+  dynamic "statement" {
+    for_each = var.enable_ira ? [] : [1]
+
+    content {
+      actions = [
+        "sts:AssumeRole",
+        "sts:TagSession",
+      ]
+
+      principals {
+        type        = "Service"
+        identifiers = ["ssm.amazonaws.com"]
+      }
+    }
+  }
+
+  # IAM Roles Anywhere
+  dynamic "statement" {
+    for_each = var.enable_ira ? [1] : []
+
+    content {
+      actions = [
+        "sts:TagSession",
+        "sts:SetSourceIdentity",
+      ]
+
+      principals {
+        type        = "AWS"
+        identifiers = [aws_iam_role.intermediate[0].arn]
+      }
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_ira ? [1] : []
+
+    content {
+      actions = [
+        "sts:AssumeRole",
+        "sts:TagSession",
+      ]
+
+      principals {
+        type        = "AWS"
+        identifiers = [aws_iam_role.intermediate[0].arn]
+      }
+
+      condition {
+        test     = "StringEquals"
+        variable = "sts:RoleSessionName"
+        values   = ["$${aws:PrincipalTag/x509Subject/CN}"]
+      }
+    }
+  }
+}
+
+resource "aws_iam_role" "this" {
+  count = var.create ? 1 : 0
+
+  name        = var.use_name_prefix ? null : var.name
+  name_prefix = var.use_name_prefix ? "${var.name}-" : null
+  path        = var.path
+  description = var.description
+
+  assume_role_policy    = data.aws_iam_policy_document.assume_role[0].json
+  max_session_duration  = var.max_session_duration
+  permissions_boundary  = var.permissions_boundary_arn
+  force_detach_policies = true
+
+  tags = var.tags
+}
+
+################################################################################
+# Node IAM Role Policy
+################################################################################
+
+data "aws_iam_policy_document" "this" {
+  count = var.create ? 1 : 0
+
+  statement {
+    actions = [
+      "ssm:DeregisterManagedInstance",
+      "ssm:DescribeInstanceInformation",
+    ]
+
+    resources = ["*"]
+  }
+
+  statement {
+    actions   = ["eks:DescribeCluster"]
+    resources = var.cluster_arns
+  }
+
+  dynamic "statement" {
+    for_each = var.enable_pod_identity ? [1] : []
+
+    content {
+      actions   = ["eks-auth:AssumeRoleForPodIdentity"]
+      resources = ["*"]
+    }
+  }
+
+  dynamic "statement" {
+    for_each = var.policy_statements != null ? var.policy_statements : []
+
+    content {
+      sid           = statement.value.sid
+      actions       = statement.value.actions
+      not_actions   = statement.value.not_actions
+      effect        = statement.value.effect
+      resources     = statement.value.resources
+      not_resources = statement.value.not_resources
+
+      dynamic "principals" {
+        for_each = statement.value.principals != null ? statement.value.principals : []
+
+        content {
+          type        = principals.value.type
+          identifiers = principals.value.identifiers
+        }
+      }
+
+      dynamic "not_principals" {
+        for_each = statement.value.not_principals != null ? statement.value.not_principals : []
+
+        content {
+          type        = not_principals.value.type
+          identifiers = not_principals.value.identifiers
+        }
+      }
+
+      dynamic "condition" {
+        for_each = statement.value.condition != null ? statement.value.condition : []
+
+        content {
+          test     = condition.value.test
+          values   = condition.value.values
+          variable = condition.value.variable
+        }
+      }
+    }
+  }
+}
+
+resource "aws_iam_policy" "this" {
+  count = var.create ? 1 : 0
+
+  name        = var.policy_use_name_prefix ? null : var.policy_name
+  name_prefix = var.policy_use_name_prefix ? "${var.policy_name}-" : null
+  path        = var.policy_path
+  description = var.policy_description
+  policy      = data.aws_iam_policy_document.this[0].json
+
+  tags = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "this" {
+  for_each = { for k, v in merge(
+    {
+      node                               = try(aws_iam_policy.this[0].arn, null)
+      AmazonSSMManagedInstanceCore       = "arn:${local.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore"
+      AmazonEC2ContainerRegistryPullOnly = "arn:${local.partition}:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly"
+    },
+    var.policies
+  ) : k => v if var.create }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+################################################################################
+# Roles Anywhere Profile
+################################################################################
+
+locals {
+  enable_ira = var.create && var.enable_ira
+}
+
+resource "aws_rolesanywhere_profile" "this" {
+  count = local.enable_ira ? 1 : 0
+
+  duration_seconds            = var.ira_profile_duration_seconds
+  managed_policy_arns         = var.ira_profile_managed_policy_arns
+  name                        = try(coalesce(var.ira_profile_name, var.name), null)
+  require_instance_properties = var.ira_profile_require_instance_properties
+  role_arns                   = [aws_iam_role.intermediate[0].arn]
+  session_policy              = var.ira_profile_session_policy
+
+  tags = var.tags
+}
+
+################################################################################
+# Roles Anywhere Trust Anchor
+################################################################################
+
+resource "aws_rolesanywhere_trust_anchor" "this" {
+  count = local.enable_ira ? 1 : 0
+
+  name = try(coalesce(var.ira_trust_anchor_name, var.name), null)
+
+  dynamic "notification_settings" {
+    for_each = var.ira_trust_anchor_notification_settings != null ? var.ira_trust_anchor_notification_settings : []
+
+    content {
+      channel   = try(notification_settings.value.channel, null)
+      enabled   = try(notification_settings.value.enabled, null)
+      event     = try(notification_settings.value.event, null)
+      threshold = try(notification_settings.value.threshold, null)
+    }
+  }
+
+  source {
+    source_data {
+      acm_pca_arn           = var.ira_trust_anchor_acm_pca_arn
+      x509_certificate_data = var.ira_trust_anchor_x509_certificate_data
+    }
+    source_type = var.ira_trust_anchor_source_type
+  }
+
+  tags = var.tags
+}
+
+################################################################################
+# Intermediate IAM Role
+################################################################################
+
+data "aws_iam_policy_document" "intermediate_assume_role" {
+  count = local.enable_ira ? 1 : 0
+
+  statement {
+    actions = [
+      "sts:AssumeRole",
+      "sts:TagSession",
+      "sts:SetSourceIdentity",
+    ]
+
+    principals {
+      type        = "Service"
+      identifiers = ["rolesanywhere.amazonaws.com"]
+    }
+
+    condition {
+      test     = "ArnEquals"
+      variable = "aws:SourceArn"
+      values   = concat(var.trust_anchor_arns, aws_rolesanywhere_trust_anchor.this[*].arn)
+    }
+  }
+}
+
+locals {
+  intermediate_role_use_name_prefix = coalesce(var.intermediate_role_use_name_prefix, var.use_name_prefix)
+  intermediate_role_name            = coalesce(var.intermediate_role_name, "${var.name}-inter")
+}
+
+resource "aws_iam_role" "intermediate" {
+  count = local.enable_ira ? 1 : 0
+
+  name        = local.intermediate_role_use_name_prefix ? null : local.intermediate_role_name
+  name_prefix = local.intermediate_role_use_name_prefix ? "${local.intermediate_role_name}-" : null
+  path        = coalesce(var.intermediate_role_path, var.path)
+  description = var.intermediate_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.intermediate_assume_role[0].json
+  max_session_duration  = var.max_session_duration
+  permissions_boundary  = var.permissions_boundary_arn
+  force_detach_policies = true
+
+  tags = var.tags
+}
+
+################################################################################
+# Intermediate IAM Role Policy
+################################################################################
+
+data "aws_iam_policy_document" "intermediate" {
+  count = local.enable_ira ? 1 : 0
+
+  statement {
+    actions   = ["eks:DescribeCluster"]
+    resources = var.cluster_arns
+  }
+
+  dynamic "statement" {
+    for_each = var.intermediate_policy_statements != null ? var.intermediate_policy_statements : []
+
+    content {
+      sid           = statement.value.sid
+      actions       = statement.value.actions
+      not_actions   = statement.value.not_actions
+      effect        = statement.value.effect
+      resources     = statement.value.resources
+      not_resources = statement.value.not_resources
+
+      dynamic "principals" {
+        for_each = statement.value.principals != null ? statement.value.principals : []
+
+        content {
+          type        = principals.value.type
+          identifiers = principals.value.identifiers
+        }
+      }
+
+      dynamic "not_principals" {
+        for_each = statement.value.not_principals != null ? statement.value.not_principals : []
+
+        content {
+          type        = not_principals.value.type
+          identifiers = not_principals.value.identifiers
+        }
+      }
+
+      dynamic "condition" {
+        for_each = statement.value.condition != null ? statement.value.condition : []
+
+        content {
+          test     = condition.value.test
+          values   = condition.value.values
+          variable = condition.value.variable
+        }
+      }
+    }
+  }
+}
+
+locals {
+  intermediate_policy_use_name_prefix = coalesce(var.intermediate_policy_use_name_prefix, var.policy_use_name_prefix)
+  intermediate_policy_name            = coalesce(var.intermediate_policy_name, var.policy_name)
+}
+
+resource "aws_iam_policy" "intermediate" {
+  count = local.enable_ira ? 1 : 0
+
+  name        = local.intermediate_policy_use_name_prefix ? null : local.intermediate_policy_name
+  name_prefix = local.intermediate_policy_use_name_prefix ? "${local.intermediate_policy_name}-" : null
+  path        = var.policy_path
+  description = var.policy_description
+  policy      = data.aws_iam_policy_document.intermediate[0].json
+
+  tags = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "intermediate" {
+  for_each = { for k, v in merge(
+    {
+      intermediate                       = try(aws_iam_policy.intermediate[0].arn, null)
+      AmazonEC2ContainerRegistryPullOnly = "arn:${local.partition}:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly"
+    },
+    var.intermediate_role_policies
+  ) : k => v if local.enable_ira }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
diff --git a/modules/hybrid-node-role/outputs.tf b/modules/hybrid-node-role/outputs.tf
new file mode 100644
index 0000000000..dc4e26e519
--- /dev/null
+++ b/modules/hybrid-node-role/outputs.tf
@@ -0,0 +1,37 @@
+################################################################################
+# Node IAM Role
+################################################################################
+
+output "name" {
+  description = "The name of the node IAM role"
+  value       = try(aws_iam_role.this[0].name, null)
+}
+
+output "arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = try(aws_iam_role.this[0].arn, null)
+}
+
+output "unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = try(aws_iam_role.this[0].unique_id, null)
+}
+
+################################################################################
+# Intermedaite IAM Role
+################################################################################
+
+output "intermediate_role_name" {
+  description = "The name of the node IAM role"
+  value       = try(aws_iam_role.intermediate[0].name, null)
+}
+
+output "intermediate_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = try(aws_iam_role.intermediate[0].arn, null)
+}
+
+output "intermediate_role_unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = try(aws_iam_role.intermediate[0].unique_id, null)
+}
diff --git a/modules/hybrid-node-role/variables.tf b/modules/hybrid-node-role/variables.tf
new file mode 100644
index 0000000000..34e1247667
--- /dev/null
+++ b/modules/hybrid-node-role/variables.tf
@@ -0,0 +1,284 @@
+variable "create" {
+  description = "Controls if resources should be created (affects nearly all resources)"
+  type        = bool
+  default     = true
+}
+
+################################################################################
+# Node IAM Role
+################################################################################
+
+variable "name" {
+  description = "Name of the IAM role"
+  type        = string
+  default     = "EKSHybridNode"
+}
+
+variable "use_name_prefix" {
+  description = "Determines whether the name of the IAM role (`name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "path" {
+  description = "Path of the IAM role"
+  type        = string
+  default     = "/"
+}
+
+variable "description" {
+  description = "IAM role description"
+  type        = string
+  default     = "EKS Hybrid Node IAM role"
+}
+
+variable "max_session_duration" {
+  description = "Maximum API session duration in seconds between 3600 and 43200"
+  type        = number
+  default     = null
+}
+
+variable "permissions_boundary_arn" {
+  description = "Permissions boundary ARN to use for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "tags" {
+  description = "A map of additional tags to add the the IAM role"
+  type        = map(string)
+  default     = {}
+}
+
+variable "enable_ira" {
+  description = "Enables IAM Roles Anywhere based IAM permissions on the node"
+  type        = bool
+  default     = false
+}
+
+variable "trust_anchor_arns" {
+  description = "List of IAM Roles Anywhere trust anchor ARNs. Required if `enable_ira` is set to `true`"
+  type        = list(string)
+  default     = []
+}
+
+################################################################################
+# Node IAM Role Policy
+################################################################################
+
+variable "policy_name" {
+  description = "Name of the IAM policy"
+  type        = string
+  default     = "EKSHybridNode"
+}
+
+variable "policy_use_name_prefix" {
+  description = "Determines whether the name of the IAM policy (`policy_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "policy_path" {
+  description = "Path of the IAM policy"
+  type        = string
+  default     = "/"
+}
+
+variable "policy_description" {
+  description = "IAM policy description"
+  type        = string
+  default     = "EKS Hybrid Node IAM role policy"
+}
+
+variable "policy_statements" {
+  description = "A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed"
+  type = list(object({
+    sid           = optional(string)
+    actions       = optional(list(string))
+    not_actions   = optional(list(string))
+    effect        = optional(string)
+    resources     = optional(list(string))
+    not_resources = optional(list(string))
+    principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    not_principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    condition = optional(list(object({
+      test     = string
+      values   = list(string)
+      variable = string
+    })))
+  }))
+  default = null
+}
+
+variable "policies" {
+  description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format"
+  type        = map(string)
+  default     = {}
+}
+
+variable "cluster_arns" {
+  description = "List of EKS cluster ARNs to allow the node to describe"
+  type        = list(string)
+  default     = ["*"]
+}
+
+variable "enable_pod_identity" {
+  description = "Enables EKS Pod Identity based IAM permissions on the node"
+  type        = bool
+  default     = true
+}
+
+################################################################################
+# IAM Roles Anywhere Profile
+################################################################################
+
+variable "ira_profile_name" {
+  description = "Name of the Roles Anywhere profile"
+  type        = string
+  default     = null
+}
+
+variable "ira_profile_duration_seconds" {
+  description = "The number of seconds the vended session credentials are valid for. Defaults to `3600`"
+  type        = number
+  default     = null
+}
+
+variable "ira_profile_managed_policy_arns" {
+  description = "A list of managed policy ARNs that apply to the vended session credentials"
+  type        = list(string)
+  default     = []
+}
+
+variable "ira_profile_require_instance_properties" {
+  description = "Specifies whether instance properties are required in [CreateSession](https://docs.aws.amazon.com/rolesanywhere/latest/APIReference/API_CreateSession.html) requests with this profile"
+  type        = bool
+  default     = null
+}
+
+variable "ira_profile_session_policy" {
+  description = "A session policy that applies to the trust boundary of the vended session credentials"
+  type        = string
+  default     = null
+}
+
+################################################################################
+# Roles Anywhere Trust Anchor
+################################################################################
+
+variable "ira_trust_anchor_name" {
+  description = "Name of the Roles Anywhere trust anchor"
+  type        = string
+  default     = null
+}
+
+variable "ira_trust_anchor_notification_settings" {
+  description = "Notification settings for the trust anchor"
+  type = list(object({
+    channel   = optional(string)
+    enabled   = optional(bool)
+    event     = optional(string)
+    threshold = optional(number)
+  }))
+  default = null
+}
+
+variable "ira_trust_anchor_acm_pca_arn" {
+  description = "The ARN of the ACM PCA that issued the trust anchor certificate"
+  type        = string
+  default     = null
+}
+
+variable "ira_trust_anchor_x509_certificate_data" {
+  description = "The X.509 certificate data of the trust anchor"
+  type        = string
+  default     = null
+}
+
+variable "ira_trust_anchor_source_type" {
+  description = "The source type of the trust anchor"
+  type        = string
+  default     = null
+}
+
+################################################################################
+# Intermediate IAM Role
+################################################################################
+
+variable "intermediate_role_name" {
+  description = "Name of the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "intermediate_role_use_name_prefix" {
+  description = "Determines whether the name of the IAM role (`intermediate_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "intermediate_role_path" {
+  description = "Path of the IAM role"
+  type        = string
+  default     = "/"
+}
+
+variable "intermediate_role_description" {
+  description = "IAM role description"
+  type        = string
+  default     = "EKS Hybrid Node IAM Roles Anywhere intermediate IAM role"
+}
+
+################################################################################
+# Intermediate IAM Role Policy
+################################################################################
+
+variable "intermediate_policy_name" {
+  description = "Name of the IAM policy"
+  type        = string
+  default     = null
+}
+
+variable "intermediate_policy_use_name_prefix" {
+  description = "Determines whether the name of the IAM policy (`intermediate_policy_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "intermediate_policy_statements" {
+  description = "A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed"
+  type = list(object({
+    sid           = optional(string)
+    actions       = optional(list(string))
+    not_actions   = optional(list(string))
+    effect        = optional(string)
+    resources     = optional(list(string))
+    not_resources = optional(list(string))
+    principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    not_principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    condition = optional(list(object({
+      test     = string
+      values   = list(string)
+      variable = string
+    })))
+  }))
+  default = null
+}
+
+variable "intermediate_role_policies" {
+  description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format"
+  type        = map(string)
+  default     = {}
+}
diff --git a/modules/hybrid-node-role/versions.tf b/modules/hybrid-node-role/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/modules/hybrid-node-role/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/modules/karpenter/README.md b/modules/karpenter/README.md
new file mode 100644
index 0000000000..85e472b225
--- /dev/null
+++ b/modules/karpenter/README.md
@@ -0,0 +1,198 @@
+# Karpenter Module
+
+Configuration in this directory creates the AWS resources required by Karpenter
+
+## Usage
+
+### All Resources (Default)
+
+In the following example, the Karpenter module will create:
+- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller
+- A Pod Identity association to grant Karpenter controller access provided by the IAM Role
+- A Node IAM role that Karpenter will use to create an Instance Profile for the nodes to receive IAM permissions
+- An access entry for the Node IAM role to allow nodes to join the cluster
+- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc.
+
+```hcl
+module "eks" {
+  source = "terraform-aws-modules/eks/aws"
+
+  ...
+}
+
+module "karpenter" {
+  source = "terraform-aws-modules/eks/aws//modules/karpenter"
+
+  cluster_name = module.eks.cluster_name
+
+  # Attach additional IAM policies to the Karpenter node IAM role
+  node_iam_role_additional_policies = {
+    AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
+  }
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+### Re-Use Existing Node IAM Role
+
+In the following example, the Karpenter module will create:
+- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller
+- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc.
+
+In this scenario, Karpenter will re-use an existing Node IAM role from the EKS managed node group which already has the necessary access entry permissions:
+
+```hcl
+module "eks" {
+  source = "terraform-aws-modules/eks"
+
+  # Shown just for connection between cluster and Karpenter sub-module below
+  eks_managed_node_groups = {
+    initial = {
+      instance_types = ["t3.medium"]
+
+      min_size     = 1
+      max_size     = 3
+      desired_size = 1
+    }
+  }
+  ...
+}
+
+module "karpenter" {
+  source = "terraform-aws-modules/eks/aws//modules/karpenter"
+
+  cluster_name = module.eks.cluster_name
+
+  create_node_iam_role = false
+  node_iam_role_arn    = module.eks.eks_managed_node_groups["initial"].iam_role_arn
+
+  # Since the node group role will already have an access entry
+  create_access_entry = false
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_cloudwatch_event_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
+| [aws_cloudwatch_event_target.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
+| [aws_eks_access_entry.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource |
+| [aws_eks_pod_identity_association.karpenter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_pod_identity_association) | resource |
+| [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_iam_policy.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.controller_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_sqs_queue.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource |
+| [aws_sqs_queue_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_iam_policy_document.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.controller_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.node_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_access_entry_type"></a> [access\_entry\_type](#input\_access\_entry\_type) | Type of the access entry. `EC2_LINUX`, `FARGATE_LINUX`, or `EC2_WINDOWS`; defaults to `EC2_LINUX` | `string` | `"EC2_LINUX"` | no |
+| <a name="input_ami_id_ssm_parameter_arns"></a> [ami\_id\_ssm\_parameter\_arns](#input\_ami\_id\_ssm\_parameter\_arns) | List of SSM Parameter ARNs that Karpenter controller is allowed read access (for retrieving AMI IDs) | `list(string)` | `[]` | no |
+| <a name="input_cluster_ip_family"></a> [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. Note: If `ipv6` is specified, the `AmazonEKS_CNI_IPv6_Policy` must exist in the account. This policy is created by the EKS module with `create_cni_ipv6_iam_policy = true` | `string` | `"ipv4"` | no |
+| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | The name of the EKS cluster | `string` | `""` | no |
+| <a name="input_create"></a> [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no |
+| <a name="input_create_access_entry"></a> [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the node IAM role | `bool` | `true` | no |
+| <a name="input_create_iam_role"></a> [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created | `bool` | `true` | no |
+| <a name="input_create_instance_profile"></a> [create\_instance\_profile](#input\_create\_instance\_profile) | Whether to create an IAM instance profile | `bool` | `false` | no |
+| <a name="input_create_node_iam_role"></a> [create\_node\_iam\_role](#input\_create\_node\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
+| <a name="input_create_pod_identity_association"></a> [create\_pod\_identity\_association](#input\_create\_pod\_identity\_association) | Determines whether to create pod identity association | `bool` | `true` | no |
+| <a name="input_enable_spot_termination"></a> [enable\_spot\_termination](#input\_enable\_spot\_termination) | Determines whether to enable native spot termination handling | `bool` | `true` | no |
+| <a name="input_iam_policy_description"></a> [iam\_policy\_description](#input\_iam\_policy\_description) | IAM policy description | `string` | `"Karpenter controller IAM policy"` | no |
+| <a name="input_iam_policy_name"></a> [iam\_policy\_name](#input\_iam\_policy\_name) | Name of the IAM policy | `string` | `"KarpenterController"` | no |
+| <a name="input_iam_policy_path"></a> [iam\_policy\_path](#input\_iam\_policy\_path) | Path of the IAM policy | `string` | `"/"` | no |
+| <a name="input_iam_policy_statements"></a> [iam\_policy\_statements](#input\_iam\_policy\_statements) | A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed | <pre>list(object({<br/>    sid           = optional(string)<br/>    actions       = optional(list(string))<br/>    not_actions   = optional(list(string))<br/>    effect        = optional(string)<br/>    resources     = optional(list(string))<br/>    not_resources = optional(list(string))<br/>    principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    not_principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    condition = optional(list(object({<br/>      test     = string<br/>      values   = list(string)<br/>      variable = string<br/>    })))<br/>  }))</pre> | `null` | no |
+| <a name="input_iam_policy_use_name_prefix"></a> [iam\_policy\_use\_name\_prefix](#input\_iam\_policy\_use\_name\_prefix) | Determines whether the name of the IAM policy (`iam_policy_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_iam_role_description"></a> [iam\_role\_description](#input\_iam\_role\_description) | IAM role description | `string` | `"Karpenter controller IAM role"` | no |
+| <a name="input_iam_role_max_session_duration"></a> [iam\_role\_max\_session\_duration](#input\_iam\_role\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no |
+| <a name="input_iam_role_name"></a> [iam\_role\_name](#input\_iam\_role\_name) | Name of the IAM role | `string` | `"KarpenterController"` | no |
+| <a name="input_iam_role_override_assume_policy_documents"></a> [iam\_role\_override\_assume\_policy\_documents](#input\_iam\_role\_override\_assume\_policy\_documents) | A list of IAM policy documents to override the default assume role policy document for the Karpenter controller IAM role | `list(string)` | `[]` | no |
+| <a name="input_iam_role_path"></a> [iam\_role\_path](#input\_iam\_role\_path) | Path of the IAM role | `string` | `"/"` | no |
+| <a name="input_iam_role_permissions_boundary_arn"></a> [iam\_role\_permissions\_boundary\_arn](#input\_iam\_role\_permissions\_boundary\_arn) | Permissions boundary ARN to use for the IAM role | `string` | `null` | no |
+| <a name="input_iam_role_policies"></a> [iam\_role\_policies](#input\_iam\_role\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no |
+| <a name="input_iam_role_source_assume_policy_documents"></a> [iam\_role\_source\_assume\_policy\_documents](#input\_iam\_role\_source\_assume\_policy\_documents) | A list of IAM policy documents to use as a source for the assume role policy document for the Karpenter controller IAM role | `list(string)` | `[]` | no |
+| <a name="input_iam_role_tags"></a> [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add the the IAM role | `map(string)` | `{}` | no |
+| <a name="input_iam_role_use_name_prefix"></a> [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the name of the IAM role (`iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_namespace"></a> [namespace](#input\_namespace) | Namespace to associate with the Karpenter Pod Identity | `string` | `"kube-system"` | no |
+| <a name="input_node_iam_role_additional_policies"></a> [node\_iam\_role\_additional\_policies](#input\_node\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no |
+| <a name="input_node_iam_role_arn"></a> [node\_iam\_role\_arn](#input\_node\_iam\_role\_arn) | Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
+| <a name="input_node_iam_role_attach_cni_policy"></a> [node\_iam\_role\_attach\_cni\_policy](#input\_node\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| <a name="input_node_iam_role_description"></a> [node\_iam\_role\_description](#input\_node\_iam\_role\_description) | Description of the role | `string` | `null` | no |
+| <a name="input_node_iam_role_max_session_duration"></a> [node\_iam\_role\_max\_session\_duration](#input\_node\_iam\_role\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no |
+| <a name="input_node_iam_role_name"></a> [node\_iam\_role\_name](#input\_node\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| <a name="input_node_iam_role_path"></a> [node\_iam\_role\_path](#input\_node\_iam\_role\_path) | IAM role path | `string` | `"/"` | no |
+| <a name="input_node_iam_role_permissions_boundary"></a> [node\_iam\_role\_permissions\_boundary](#input\_node\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| <a name="input_node_iam_role_tags"></a> [node\_iam\_role\_tags](#input\_node\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| <a name="input_node_iam_role_use_name_prefix"></a> [node\_iam\_role\_use\_name\_prefix](#input\_node\_iam\_role\_use\_name\_prefix) | Determines whether the Node IAM role name (`node_iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_queue_kms_data_key_reuse_period_seconds"></a> [queue\_kms\_data\_key\_reuse\_period\_seconds](#input\_queue\_kms\_data\_key\_reuse\_period\_seconds) | The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again | `number` | `null` | no |
+| <a name="input_queue_kms_master_key_id"></a> [queue\_kms\_master\_key\_id](#input\_queue\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK | `string` | `null` | no |
+| <a name="input_queue_managed_sse_enabled"></a> [queue\_managed\_sse\_enabled](#input\_queue\_managed\_sse\_enabled) | Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys | `bool` | `true` | no |
+| <a name="input_queue_name"></a> [queue\_name](#input\_queue\_name) | Name of the SQS queue | `string` | `null` | no |
+| <a name="input_region"></a> [region](#input\_region) | Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration | `string` | `null` | no |
+| <a name="input_rule_name_prefix"></a> [rule\_name\_prefix](#input\_rule\_name\_prefix) | Prefix used for all event bridge rules | `string` | `"Karpenter"` | no |
+| <a name="input_service_account"></a> [service\_account](#input\_service\_account) | Service account to associate with the Karpenter Pod Identity | `string` | `"karpenter"` | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_event_rules"></a> [event\_rules](#output\_event\_rules) | Map of the event rules created and their attributes |
+| <a name="output_iam_role_arn"></a> [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the controller IAM role |
+| <a name="output_iam_role_name"></a> [iam\_role\_name](#output\_iam\_role\_name) | The name of the controller IAM role |
+| <a name="output_iam_role_unique_id"></a> [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the controller IAM role |
+| <a name="output_instance_profile_arn"></a> [instance\_profile\_arn](#output\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile |
+| <a name="output_instance_profile_id"></a> [instance\_profile\_id](#output\_instance\_profile\_id) | Instance profile's ID |
+| <a name="output_instance_profile_name"></a> [instance\_profile\_name](#output\_instance\_profile\_name) | Name of the instance profile |
+| <a name="output_instance_profile_unique"></a> [instance\_profile\_unique](#output\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile |
+| <a name="output_namespace"></a> [namespace](#output\_namespace) | Namespace associated with the Karpenter Pod Identity |
+| <a name="output_node_access_entry_arn"></a> [node\_access\_entry\_arn](#output\_node\_access\_entry\_arn) | Amazon Resource Name (ARN) of the node Access Entry |
+| <a name="output_node_iam_role_arn"></a> [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_node_iam_role_name"></a> [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | The name of the node IAM role |
+| <a name="output_node_iam_role_unique_id"></a> [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the node IAM role |
+| <a name="output_queue_arn"></a> [queue\_arn](#output\_queue\_arn) | The ARN of the SQS queue |
+| <a name="output_queue_name"></a> [queue\_name](#output\_queue\_name) | The name of the created Amazon SQS queue |
+| <a name="output_queue_url"></a> [queue\_url](#output\_queue\_url) | The URL for the created Amazon SQS queue |
+| <a name="output_service_account"></a> [service\_account](#output\_service\_account) | Service Account associated with the Karpenter Pod Identity |
+<!-- END_TF_DOCS -->
diff --git a/modules/karpenter/main.tf b/modules/karpenter/main.tf
new file mode 100644
index 0000000000..7eba2cb5d8
--- /dev/null
+++ b/modules/karpenter/main.tf
@@ -0,0 +1,358 @@
+data "aws_region" "current" {
+  count = var.create ? 1 : 0
+
+  region = var.region
+}
+data "aws_partition" "current" {
+  count = var.create ? 1 : 0
+}
+data "aws_caller_identity" "current" {
+  count = var.create ? 1 : 0
+}
+
+locals {
+  account_id = try(data.aws_caller_identity.current[0].account_id, "")
+  dns_suffix = try(data.aws_partition.current[0].dns_suffix, "")
+  partition  = try(data.aws_partition.current[0].partition, "")
+  region     = try(data.aws_region.current[0].region, "")
+}
+
+################################################################################
+# Karpenter controller IAM Role
+################################################################################
+
+locals {
+  create_iam_role = var.create && var.create_iam_role
+}
+
+data "aws_iam_policy_document" "controller_assume_role" {
+  count = local.create_iam_role ? 1 : 0
+
+  override_policy_documents = var.iam_role_override_assume_policy_documents
+  source_policy_documents   = var.iam_role_source_assume_policy_documents
+
+  # Pod Identity
+  statement {
+    sid = "PodIdentity"
+    actions = [
+      "sts:AssumeRole",
+      "sts:TagSession",
+    ]
+
+    principals {
+      type        = "Service"
+      identifiers = ["pods.eks.amazonaws.com"]
+    }
+  }
+}
+
+resource "aws_iam_role" "controller" {
+  count = local.create_iam_role ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : var.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${var.iam_role_name}-" : null
+  path        = var.iam_role_path
+  description = var.iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.controller_assume_role[0].json
+  max_session_duration  = var.iam_role_max_session_duration
+  permissions_boundary  = var.iam_role_permissions_boundary_arn
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.iam_role_tags)
+}
+
+resource "aws_iam_policy" "controller" {
+  count = local.create_iam_role ? 1 : 0
+
+  name        = var.iam_policy_use_name_prefix ? null : var.iam_policy_name
+  name_prefix = var.iam_policy_use_name_prefix ? "${var.iam_policy_name}-" : null
+  path        = var.iam_policy_path
+  description = var.iam_policy_description
+  policy      = data.aws_iam_policy_document.controller[0].json
+
+  tags = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "controller" {
+  count = local.create_iam_role ? 1 : 0
+
+  role       = aws_iam_role.controller[0].name
+  policy_arn = aws_iam_policy.controller[0].arn
+}
+
+resource "aws_iam_role_policy_attachment" "controller_additional" {
+  for_each = { for k, v in var.iam_role_policies : k => v if local.create_iam_role }
+
+  role       = aws_iam_role.controller[0].name
+  policy_arn = each.value
+}
+
+################################################################################
+# Pod Identity Association
+################################################################################
+
+resource "aws_eks_pod_identity_association" "karpenter" {
+  count = local.create_iam_role && var.create_pod_identity_association ? 1 : 0
+
+  region = var.region
+
+  cluster_name    = var.cluster_name
+  namespace       = var.namespace
+  service_account = var.service_account
+  role_arn        = aws_iam_role.controller[0].arn
+
+  tags = var.tags
+}
+
+################################################################################
+# Node Termination Queue
+################################################################################
+
+locals {
+  enable_spot_termination = var.create && var.enable_spot_termination
+
+  queue_name = coalesce(var.queue_name, "Karpenter-${var.cluster_name}")
+}
+
+resource "aws_sqs_queue" "this" {
+  count = local.enable_spot_termination ? 1 : 0
+
+  region = var.region
+
+  name                              = local.queue_name
+  message_retention_seconds         = 300
+  sqs_managed_sse_enabled           = var.queue_managed_sse_enabled ? var.queue_managed_sse_enabled : null
+  kms_master_key_id                 = var.queue_kms_master_key_id
+  kms_data_key_reuse_period_seconds = var.queue_kms_data_key_reuse_period_seconds
+
+  tags = var.tags
+}
+
+data "aws_iam_policy_document" "queue" {
+  count = local.enable_spot_termination ? 1 : 0
+
+  statement {
+    sid       = "SqsWrite"
+    actions   = ["sqs:SendMessage"]
+    resources = [aws_sqs_queue.this[0].arn]
+
+    principals {
+      type = "Service"
+      identifiers = [
+        "events.amazonaws.com",
+        "sqs.amazonaws.com",
+      ]
+    }
+  }
+  statement {
+    sid    = "DenyHTTP"
+    effect = "Deny"
+    actions = [
+      "sqs:*"
+    ]
+    resources = [aws_sqs_queue.this[0].arn]
+    condition {
+      test     = "Bool"
+      variable = "aws:SecureTransport"
+      values = [
+        "false"
+      ]
+    }
+    principals {
+      type = "*"
+      identifiers = [
+        "*"
+      ]
+    }
+  }
+}
+
+resource "aws_sqs_queue_policy" "this" {
+  count = local.enable_spot_termination ? 1 : 0
+
+  region = var.region
+
+  queue_url = aws_sqs_queue.this[0].url
+  policy    = data.aws_iam_policy_document.queue[0].json
+}
+
+################################################################################
+# Node Termination Event Rules
+################################################################################
+
+locals {
+  events = {
+    health_event = {
+      name        = "HealthEvent"
+      description = "Karpenter interrupt - AWS health event"
+      event_pattern = {
+        source      = ["aws.health"]
+        detail-type = ["AWS Health Event"]
+      }
+    }
+    spot_interrupt = {
+      name        = "SpotInterrupt"
+      description = "Karpenter interrupt - EC2 spot instance interruption warning"
+      event_pattern = {
+        source      = ["aws.ec2"]
+        detail-type = ["EC2 Spot Instance Interruption Warning"]
+      }
+    }
+    instance_rebalance = {
+      name        = "InstanceRebalance"
+      description = "Karpenter interrupt - EC2 instance rebalance recommendation"
+      event_pattern = {
+        source      = ["aws.ec2"]
+        detail-type = ["EC2 Instance Rebalance Recommendation"]
+      }
+    }
+    instance_state_change = {
+      name        = "InstanceStateChange"
+      description = "Karpenter interrupt - EC2 instance state-change notification"
+      event_pattern = {
+        source      = ["aws.ec2"]
+        detail-type = ["EC2 Instance State-change Notification"]
+      }
+    }
+  }
+}
+
+resource "aws_cloudwatch_event_rule" "this" {
+  for_each = { for k, v in local.events : k => v if local.enable_spot_termination }
+
+  region = var.region
+
+  name_prefix   = "${var.rule_name_prefix}${each.value.name}-"
+  description   = each.value.description
+  event_pattern = jsonencode(each.value.event_pattern)
+
+  tags = merge(
+    { "ClusterName" : var.cluster_name },
+    var.tags,
+  )
+}
+
+resource "aws_cloudwatch_event_target" "this" {
+  for_each = { for k, v in local.events : k => v if local.enable_spot_termination }
+
+  region = var.region
+
+  rule      = aws_cloudwatch_event_rule.this[each.key].name
+  target_id = "KarpenterInterruptionQueueTarget"
+  arn       = aws_sqs_queue.this[0].arn
+}
+
+################################################################################
+# Node IAM Role
+# This is used by the nodes launched by Karpenter
+################################################################################
+
+locals {
+  create_node_iam_role = var.create && var.create_node_iam_role
+
+  node_iam_role_name          = coalesce(var.node_iam_role_name, "Karpenter-${var.cluster_name}")
+  node_iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy"
+
+  ipv4_cni_policy = { for k, v in {
+    AmazonEKS_CNI_Policy = "${local.node_iam_role_policy_prefix}/AmazonEKS_CNI_Policy"
+  } : k => v if var.node_iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" }
+  ipv6_cni_policy = { for k, v in {
+    AmazonEKS_CNI_IPv6_Policy = "arn:${local.partition}:iam::${local.account_id}:policy/AmazonEKS_CNI_IPv6_Policy"
+  } : k => v if var.node_iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" }
+}
+
+data "aws_iam_policy_document" "node_assume_role" {
+  count = local.create_node_iam_role ? 1 : 0
+
+  statement {
+    sid     = "EKSNodeAssumeRole"
+    actions = ["sts:AssumeRole"]
+
+    principals {
+      type        = "Service"
+      identifiers = ["ec2.${local.dns_suffix}"]
+    }
+  }
+}
+
+resource "aws_iam_role" "node" {
+  count = local.create_node_iam_role ? 1 : 0
+
+  name        = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name
+  name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null
+  path        = var.node_iam_role_path
+  description = var.node_iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.node_assume_role[0].json
+  max_session_duration  = var.node_iam_role_max_session_duration
+  permissions_boundary  = var.node_iam_role_permissions_boundary
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.node_iam_role_tags)
+}
+
+# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
+resource "aws_iam_role_policy_attachment" "node" {
+  for_each = { for k, v in merge(
+    {
+      AmazonEKSWorkerNodePolicy          = "${local.node_iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy"
+      AmazonEC2ContainerRegistryReadOnly = "${local.node_iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly"
+    },
+    local.ipv4_cni_policy,
+    local.ipv6_cni_policy
+  ) : k => v if local.create_node_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.node[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "node_additional" {
+  for_each = { for k, v in var.node_iam_role_additional_policies : k => v if local.create_node_iam_role }
+
+  policy_arn = each.value
+  role       = aws_iam_role.node[0].name
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+resource "aws_eks_access_entry" "node" {
+  count = var.create && var.create_access_entry ? 1 : 0
+
+  region = var.region
+
+  cluster_name  = var.cluster_name
+  principal_arn = var.create_node_iam_role ? aws_iam_role.node[0].arn : var.node_iam_role_arn
+  type          = var.access_entry_type
+
+  tags = var.tags
+
+  depends_on = [
+    # If we try to add this too quickly, it fails. So .... we wait
+    aws_sqs_queue_policy.this,
+  ]
+}
+
+################################################################################
+# Node IAM Instance Profile
+# This is used by the nodes launched by Karpenter
+# Starting with Karpenter 0.32 this is no longer required as Karpenter will
+# create the Instance Profile
+################################################################################
+
+locals {
+  external_role_name = try(replace(var.node_iam_role_arn, "/^(.*role/)/", ""), null)
+}
+
+resource "aws_iam_instance_profile" "this" {
+  count = var.create && var.create_instance_profile ? 1 : 0
+
+  name        = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name
+  name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null
+  path        = var.node_iam_role_path
+  role        = var.create_node_iam_role ? aws_iam_role.node[0].name : local.external_role_name
+
+  tags = merge(var.tags, var.node_iam_role_tags)
+}
diff --git a/modules/karpenter/migrations.tf b/modules/karpenter/migrations.tf
new file mode 100644
index 0000000000..b40040f330
--- /dev/null
+++ b/modules/karpenter/migrations.tf
@@ -0,0 +1,77 @@
+################################################################################
+# Migrations: v19.21 -> v20.0
+################################################################################
+
+# Node IAM role
+moved {
+  from = aws_iam_role.this
+  to   = aws_iam_role.node
+}
+
+moved {
+  from = aws_iam_policy.this
+  to   = aws_iam_policy.node
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.this
+  to   = aws_iam_role_policy_attachment.node
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.additional
+  to   = aws_iam_role_policy_attachment.node_additional
+}
+
+# Controller IAM role
+moved {
+  from = aws_iam_role.irsa
+  to   = aws_iam_role.controller
+}
+
+moved {
+  from = aws_iam_policy.irsa
+  to   = aws_iam_policy.controller
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.irsa
+  to   = aws_iam_role_policy_attachment.controller
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.irsa_additional
+  to   = aws_iam_role_policy_attachment.controller_additional
+}
+
+# Spelling correction
+moved {
+  from = aws_cloudwatch_event_target.this["spot_interupt"]
+  to   = aws_cloudwatch_event_target.this["spot_interrupt"]
+}
+
+moved {
+  from = aws_cloudwatch_event_rule.this["spot_interupt"]
+  to   = aws_cloudwatch_event_rule.this["spot_interrupt"]
+}
+
+################################################################################
+# Migrations: v20.7 -> v20.8
+################################################################################
+
+# Node IAM role policy attachment
+# Commercial partition only - `moved` does now allow multiple moves to same target
+moved {
+  from = aws_iam_role_policy_attachment.node["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"]
+  to   = aws_iam_role_policy_attachment.node["AmazonEKSWorkerNodePolicy"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.node["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"]
+  to   = aws_iam_role_policy_attachment.node["AmazonEC2ContainerRegistryReadOnly"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.node["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"]
+  to   = aws_iam_role_policy_attachment.node["AmazonEKS_CNI_Policy"]
+}
diff --git a/modules/karpenter/outputs.tf b/modules/karpenter/outputs.tf
new file mode 100644
index 0000000000..a71d47242d
--- /dev/null
+++ b/modules/karpenter/outputs.tf
@@ -0,0 +1,112 @@
+################################################################################
+# Karpenter controller IAM Role
+################################################################################
+
+output "iam_role_name" {
+  description = "The name of the controller IAM role"
+  value       = try(aws_iam_role.controller[0].name, null)
+}
+
+output "iam_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the controller IAM role"
+  value       = try(aws_iam_role.controller[0].arn, null)
+}
+
+output "iam_role_unique_id" {
+  description = "Stable and unique string identifying the controller IAM role"
+  value       = try(aws_iam_role.controller[0].unique_id, null)
+}
+
+################################################################################
+# Node Termination Queue
+################################################################################
+
+output "queue_arn" {
+  description = "The ARN of the SQS queue"
+  value       = try(aws_sqs_queue.this[0].arn, null)
+}
+
+output "queue_name" {
+  description = "The name of the created Amazon SQS queue"
+  value       = try(aws_sqs_queue.this[0].name, null)
+}
+
+output "queue_url" {
+  description = "The URL for the created Amazon SQS queue"
+  value       = try(aws_sqs_queue.this[0].url, null)
+}
+
+################################################################################
+# Node Termination Event Rules
+################################################################################
+
+output "event_rules" {
+  description = "Map of the event rules created and their attributes"
+  value       = aws_cloudwatch_event_rule.this
+}
+
+################################################################################
+# Node IAM Role
+################################################################################
+
+output "node_iam_role_name" {
+  description = "The name of the node IAM role"
+  value       = try(aws_iam_role.node[0].name, null)
+}
+
+output "node_iam_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = try(aws_iam_role.node[0].arn, var.node_iam_role_arn)
+}
+
+output "node_iam_role_unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = try(aws_iam_role.node[0].unique_id, null)
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+output "node_access_entry_arn" {
+  description = "Amazon Resource Name (ARN) of the node Access Entry"
+  value       = try(aws_eks_access_entry.node[0].access_entry_arn, null)
+}
+
+################################################################################
+# Node IAM Instance Profile
+################################################################################
+
+output "instance_profile_arn" {
+  description = "ARN assigned by AWS to the instance profile"
+  value       = try(aws_iam_instance_profile.this[0].arn, null)
+}
+
+output "instance_profile_id" {
+  description = "Instance profile's ID"
+  value       = try(aws_iam_instance_profile.this[0].id, null)
+}
+
+output "instance_profile_name" {
+  description = "Name of the instance profile"
+  value       = try(aws_iam_instance_profile.this[0].name, null)
+}
+
+output "instance_profile_unique" {
+  description = "Stable and unique string identifying the IAM instance profile"
+  value       = try(aws_iam_instance_profile.this[0].unique_id, null)
+}
+
+################################################################################
+# Pod Identity
+################################################################################
+
+output "namespace" {
+  description = "Namespace associated with the Karpenter Pod Identity"
+  value       = var.namespace
+}
+
+output "service_account" {
+  description = "Service Account associated with the Karpenter Pod Identity"
+  value       = var.service_account
+}
diff --git a/modules/karpenter/policy.tf b/modules/karpenter/policy.tf
new file mode 100644
index 0000000000..16f28fa61d
--- /dev/null
+++ b/modules/karpenter/policy.tf
@@ -0,0 +1,397 @@
+data "aws_iam_policy_document" "controller" {
+  count = local.create_iam_role ? 1 : 0
+
+  statement {
+    sid = "AllowScopedEC2InstanceAccessActions"
+    resources = [
+      "arn:${local.partition}:ec2:${local.region}::image/*",
+      "arn:${local.partition}:ec2:${local.region}::snapshot/*",
+      "arn:${local.partition}:ec2:${local.region}:*:security-group/*",
+      "arn:${local.partition}:ec2:${local.region}:*:subnet/*",
+      "arn:${local.partition}:ec2:${local.region}:*:capacity-reservation/*",
+    ]
+
+    actions = [
+      "ec2:RunInstances",
+      "ec2:CreateFleet"
+    ]
+  }
+
+  statement {
+    sid = "AllowScopedEC2LaunchTemplateAccessActions"
+    resources = [
+      "arn:${local.partition}:ec2:${local.region}:*:launch-template/*"
+    ]
+
+    actions = [
+      "ec2:RunInstances",
+      "ec2:CreateFleet"
+    ]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:ResourceTag/karpenter.sh/nodepool"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid = "AllowScopedEC2InstanceActionsWithTags"
+    resources = [
+      "arn:${local.partition}:ec2:${local.region}:*:fleet/*",
+      "arn:${local.partition}:ec2:${local.region}:*:instance/*",
+      "arn:${local.partition}:ec2:${local.region}:*:volume/*",
+      "arn:${local.partition}:ec2:${local.region}:*:network-interface/*",
+      "arn:${local.partition}:ec2:${local.region}:*:launch-template/*",
+      "arn:${local.partition}:ec2:${local.region}:*:spot-instances-request/*",
+    ]
+    actions = [
+      "ec2:RunInstances",
+      "ec2:CreateFleet",
+      "ec2:CreateLaunchTemplate"
+    ]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/eks:eks-cluster-name"
+      values   = [var.cluster_name]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:RequestTag/karpenter.sh/nodepool"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid = "AllowScopedResourceCreationTagging"
+    resources = [
+      "arn:${local.partition}:ec2:${local.region}:*:fleet/*",
+      "arn:${local.partition}:ec2:${local.region}:*:instance/*",
+      "arn:${local.partition}:ec2:${local.region}:*:volume/*",
+      "arn:${local.partition}:ec2:${local.region}:*:network-interface/*",
+      "arn:${local.partition}:ec2:${local.region}:*:launch-template/*",
+      "arn:${local.partition}:ec2:${local.region}:*:spot-instances-request/*",
+    ]
+    actions = ["ec2:CreateTags"]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/eks:eks-cluster-name"
+      values   = [var.cluster_name]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "ec2:CreateAction"
+      values = [
+        "RunInstances",
+        "CreateFleet",
+        "CreateLaunchTemplate",
+      ]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:RequestTag/karpenter.sh/nodepool"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid       = "AllowScopedResourceTagging"
+    resources = ["arn:${local.partition}:ec2:${local.region}:*:instance/*"]
+    actions   = ["ec2:CreateTags"]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:ResourceTag/karpenter.sh/nodepool"
+      values   = ["*"]
+    }
+
+    condition {
+      test     = "StringEqualsIfExists"
+      variable = "aws:RequestTag/eks:eks-cluster-name"
+      values   = [var.cluster_name]
+    }
+
+    condition {
+      test     = "ForAllValues:StringEquals"
+      variable = "aws:TagKeys"
+      values = [
+        "eks:eks-cluster-name",
+        "karpenter.sh/nodeclaim",
+        "Name",
+      ]
+    }
+  }
+
+  statement {
+    sid = "AllowScopedDeletion"
+    resources = [
+      "arn:${local.partition}:ec2:${local.region}:*:instance/*",
+      "arn:${local.partition}:ec2:${local.region}:*:launch-template/*"
+    ]
+
+    actions = [
+      "ec2:TerminateInstances",
+      "ec2:DeleteLaunchTemplate"
+    ]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:ResourceTag/karpenter.sh/nodepool"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid       = "AllowRegionalReadActions"
+    resources = ["*"]
+    actions = [
+      "ec2:DescribeAvailabilityZones",
+      "ec2:DescribeImages",
+      "ec2:DescribeInstances",
+      "ec2:DescribeInstanceTypeOfferings",
+      "ec2:DescribeInstanceTypes",
+      "ec2:DescribeLaunchTemplates",
+      "ec2:DescribeSecurityGroups",
+      "ec2:DescribeSpotPriceHistory",
+      "ec2:DescribeSubnets"
+    ]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestedRegion"
+      values   = [local.region]
+    }
+  }
+
+  statement {
+    sid       = "AllowSSMReadActions"
+    resources = coalescelist(var.ami_id_ssm_parameter_arns, ["arn:${local.partition}:ssm:${local.region}::parameter/aws/service/*"])
+    actions   = ["ssm:GetParameter"]
+  }
+
+  statement {
+    sid       = "AllowPricingReadActions"
+    resources = ["*"]
+    actions   = ["pricing:GetProducts"]
+  }
+
+  dynamic "statement" {
+    for_each = local.enable_spot_termination ? [1] : []
+
+    content {
+      sid       = "AllowInterruptionQueueActions"
+      resources = [try(aws_sqs_queue.this[0].arn, null)]
+      actions = [
+        "sqs:DeleteMessage",
+        "sqs:GetQueueUrl",
+        "sqs:ReceiveMessage"
+      ]
+    }
+  }
+
+  statement {
+    sid       = "AllowPassingInstanceRole"
+    resources = var.create_node_iam_role ? [aws_iam_role.node[0].arn] : [var.node_iam_role_arn]
+    actions   = ["iam:PassRole"]
+
+    condition {
+      test     = "StringEquals"
+      variable = "iam:PassedToService"
+      values   = distinct(["ec2.${local.dns_suffix}", "ec2.amazonaws.com"])
+    }
+  }
+
+  statement {
+    sid       = "AllowScopedInstanceProfileCreationActions"
+    resources = ["arn:${local.partition}:iam::${local.account_id}:instance-profile/*"]
+    actions   = ["iam:CreateInstanceProfile"]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/eks:eks-cluster-name"
+      values   = [var.cluster_name]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/topology.kubernetes.io/region"
+      values   = [local.region]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid       = "AllowScopedInstanceProfileTagActions"
+    resources = ["arn:${local.partition}:iam::${local.account_id}:instance-profile/*"]
+    actions   = ["iam:TagInstanceProfile"]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/topology.kubernetes.io/region"
+      values   = [local.region]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/eks:eks-cluster-name"
+      values   = [var.cluster_name]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:RequestTag/topology.kubernetes.io/region"
+      values   = [local.region]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass"
+      values   = ["*"]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid       = "AllowScopedInstanceProfileActions"
+    resources = ["arn:${local.partition}:iam::${local.account_id}:instance-profile/*"]
+    actions = [
+      "iam:AddRoleToInstanceProfile",
+      "iam:RemoveRoleFromInstanceProfile",
+      "iam:DeleteInstanceProfile"
+    ]
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}"
+      values   = ["owned"]
+    }
+
+    condition {
+      test     = "StringEquals"
+      variable = "aws:ResourceTag/topology.kubernetes.io/region"
+      values   = [local.region]
+    }
+
+    condition {
+      test     = "StringLike"
+      variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass"
+      values   = ["*"]
+    }
+  }
+
+  statement {
+    sid       = "AllowInstanceProfileReadActions"
+    resources = ["arn:${local.partition}:iam::${local.account_id}:instance-profile/*"]
+    actions   = ["iam:GetInstanceProfile"]
+  }
+
+  statement {
+    sid       = "AllowAPIServerEndpointDiscovery"
+    resources = ["arn:${local.partition}:eks:${local.region}:${local.account_id}:cluster/${var.cluster_name}"]
+    actions   = ["eks:DescribeCluster"]
+  }
+
+  dynamic "statement" {
+    for_each = var.iam_policy_statements != null ? var.iam_policy_statements : []
+
+    content {
+      sid           = statement.value.sid
+      actions       = statement.value.actions
+      not_actions   = statement.value.not_actions
+      effect        = statement.value.effect
+      resources     = statement.value.resources
+      not_resources = statement.value.not_resources
+
+      dynamic "principals" {
+        for_each = statement.value.principals != null ? statement.value.principals : []
+
+        content {
+          type        = principals.value.type
+          identifiers = principals.value.identifiers
+        }
+      }
+
+      dynamic "not_principals" {
+        for_each = statement.value.not_principals != null ? statement.value.not_principals : []
+
+        content {
+          type        = not_principals.value.type
+          identifiers = not_principals.value.identifiers
+        }
+      }
+
+      dynamic "condition" {
+        for_each = statement.value.condition != null ? statement.value.condition : []
+
+        content {
+          test     = condition.value.test
+          values   = condition.value.values
+          variable = condition.value.variable
+        }
+      }
+    }
+  }
+}
diff --git a/modules/karpenter/variables.tf b/modules/karpenter/variables.tf
new file mode 100644
index 0000000000..f0725f6a58
--- /dev/null
+++ b/modules/karpenter/variables.tf
@@ -0,0 +1,317 @@
+variable "create" {
+  description = "Controls if resources should be created (affects nearly all resources)"
+  type        = bool
+  default     = true
+}
+
+variable "tags" {
+  description = "A map of tags to add to all resources"
+  type        = map(string)
+  default     = {}
+}
+
+variable "cluster_name" {
+  description = "The name of the EKS cluster"
+  type        = string
+  default     = ""
+}
+
+variable "region" {
+  description = "Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration"
+  type        = string
+  default     = null
+}
+
+################################################################################
+# Karpenter controller IAM Role
+################################################################################
+
+variable "create_iam_role" {
+  description = "Determines whether an IAM role is created"
+  type        = bool
+  default     = true
+}
+
+variable "iam_role_name" {
+  description = "Name of the IAM role"
+  type        = string
+  default     = "KarpenterController"
+}
+
+variable "iam_role_use_name_prefix" {
+  description = "Determines whether the name of the IAM role (`iam_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "iam_role_path" {
+  description = "Path of the IAM role"
+  type        = string
+  default     = "/"
+}
+
+variable "iam_role_description" {
+  description = "IAM role description"
+  type        = string
+  default     = "Karpenter controller IAM role"
+}
+
+variable "iam_role_max_session_duration" {
+  description = "Maximum API session duration in seconds between 3600 and 43200"
+  type        = number
+  default     = null
+}
+
+variable "iam_role_permissions_boundary_arn" {
+  description = "Permissions boundary ARN to use for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_tags" {
+  description = "A map of additional tags to add the the IAM role"
+  type        = map(string)
+  default     = {}
+}
+
+variable "iam_policy_name" {
+  description = "Name of the IAM policy"
+  type        = string
+  default     = "KarpenterController"
+}
+
+variable "iam_policy_use_name_prefix" {
+  description = "Determines whether the name of the IAM policy (`iam_policy_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "iam_policy_path" {
+  description = "Path of the IAM policy"
+  type        = string
+  default     = "/"
+}
+
+variable "iam_policy_description" {
+  description = "IAM policy description"
+  type        = string
+  default     = "Karpenter controller IAM policy"
+}
+
+variable "iam_role_override_assume_policy_documents" {
+  description = "A list of IAM policy documents to override the default assume role policy document for the Karpenter controller IAM role"
+  type        = list(string)
+  default     = []
+}
+
+variable "iam_role_source_assume_policy_documents" {
+  description = "A list of IAM policy documents to use as a source for the assume role policy document for the Karpenter controller IAM role"
+  type        = list(string)
+  default     = []
+}
+
+variable "iam_policy_statements" {
+  description = "A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed"
+  type = list(object({
+    sid           = optional(string)
+    actions       = optional(list(string))
+    not_actions   = optional(list(string))
+    effect        = optional(string)
+    resources     = optional(list(string))
+    not_resources = optional(list(string))
+    principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    not_principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    condition = optional(list(object({
+      test     = string
+      values   = list(string)
+      variable = string
+    })))
+  }))
+  default = null
+}
+
+variable "iam_role_policies" {
+  description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format"
+  type        = map(string)
+  default     = {}
+}
+
+variable "ami_id_ssm_parameter_arns" {
+  description = "List of SSM Parameter ARNs that Karpenter controller is allowed read access (for retrieving AMI IDs)"
+  type        = list(string)
+  default     = []
+}
+
+################################################################################
+# Pod Identity Association
+################################################################################
+
+variable "create_pod_identity_association" {
+  description = "Determines whether to create pod identity association"
+  type        = bool
+  default     = true
+}
+
+variable "namespace" {
+  description = "Namespace to associate with the Karpenter Pod Identity"
+  type        = string
+  default     = "kube-system"
+}
+
+variable "service_account" {
+  description = "Service account to associate with the Karpenter Pod Identity"
+  type        = string
+  default     = "karpenter"
+}
+
+################################################################################
+# Node Termination Queue
+################################################################################
+
+variable "enable_spot_termination" {
+  description = "Determines whether to enable native spot termination handling"
+  type        = bool
+  default     = true
+}
+
+variable "queue_name" {
+  description = "Name of the SQS queue"
+  type        = string
+  default     = null
+}
+
+variable "queue_managed_sse_enabled" {
+  description = "Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys"
+  type        = bool
+  default     = true
+}
+
+variable "queue_kms_master_key_id" {
+  description = "The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK"
+  type        = string
+  default     = null
+}
+
+variable "queue_kms_data_key_reuse_period_seconds" {
+  description = "The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again"
+  type        = number
+  default     = null
+}
+
+################################################################################
+# Node IAM Role
+################################################################################
+
+variable "create_node_iam_role" {
+  description = "Determines whether an IAM role is created or to use an existing IAM role"
+  type        = bool
+  default     = true
+}
+
+variable "cluster_ip_family" {
+  description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. Note: If `ipv6` is specified, the `AmazonEKS_CNI_IPv6_Policy` must exist in the account. This policy is created by the EKS module with `create_cni_ipv6_iam_policy = true`"
+  type        = string
+  default     = "ipv4"
+}
+
+variable "node_iam_role_arn" {
+  description = "Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false`"
+  type        = string
+  default     = null
+}
+
+variable "node_iam_role_name" {
+  description = "Name to use on IAM role created"
+  type        = string
+  default     = null
+}
+
+variable "node_iam_role_use_name_prefix" {
+  description = "Determines whether the Node IAM role name (`node_iam_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "node_iam_role_path" {
+  description = "IAM role path"
+  type        = string
+  default     = "/"
+}
+
+variable "node_iam_role_description" {
+  description = "Description of the role"
+  type        = string
+  default     = null
+}
+
+variable "node_iam_role_max_session_duration" {
+  description = "Maximum API session duration in seconds between 3600 and 43200"
+  type        = number
+  default     = null
+}
+
+variable "node_iam_role_permissions_boundary" {
+  description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "node_iam_role_attach_cni_policy" {
+  description = "Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
+  type        = bool
+  default     = true
+}
+
+variable "node_iam_role_additional_policies" {
+  description = "Additional policies to be added to the IAM role"
+  type        = map(string)
+  default     = {}
+}
+
+variable "node_iam_role_tags" {
+  description = "A map of additional tags to add to the IAM role created"
+  type        = map(string)
+  default     = {}
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+variable "create_access_entry" {
+  description = "Determines whether an access entry is created for the IAM role used by the node IAM role"
+  type        = bool
+  default     = true
+}
+
+variable "access_entry_type" {
+  description = "Type of the access entry. `EC2_LINUX`, `FARGATE_LINUX`, or `EC2_WINDOWS`; defaults to `EC2_LINUX`"
+  type        = string
+  default     = "EC2_LINUX"
+}
+
+################################################################################
+# Node IAM Instance Profile
+################################################################################
+
+variable "create_instance_profile" {
+  description = "Whether to create an IAM instance profile"
+  type        = bool
+  default     = false
+}
+
+################################################################################
+# Event Bridge Rules
+################################################################################
+
+variable "rule_name_prefix" {
+  description = "Prefix used for all event bridge rules"
+  type        = string
+  default     = "Karpenter"
+}
diff --git a/modules/karpenter/versions.tf b/modules/karpenter/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/modules/karpenter/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md
deleted file mode 100644
index 65fde29883..0000000000
--- a/modules/node_groups/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# eks `node_groups` submodule
-
-Helper submodule to create and manage resources related to `eks_node_groups`.
-
-## Assumptions
-* Designed for use by the parent module and not directly by end users
-
-## Node Groups' IAM Role
-The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module.
-
-`iam_role_arn` must be specified in either `var.node_groups_defaults` or `var.node_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent.
-
-## `node_groups` and `node_groups_defaults` keys
-`node_groups_defaults` is a map that can take the below keys. Values will be used if not specified in individual node groups.
-
-`node_groups` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_node_group` name. Inner map can take the below values.
-
-| Name | Description | Type | If unset |
-|------|-------------|:----:|:-----:|
-| additional\_tags | Additional tags to apply to node group | map(string) | Only `var.tags` applied |
-| ami\_release\_version | AMI version of workers | string | Provider default behavior |
-| ami\_type | AMI Type. See Terraform or AWS docs | string | Provider default behavior |
-| capacity\_type | Type of instance capacity to provision. Options are `ON_DEMAND` and `SPOT` | string | Provider default behavior |
-| create_launch_template | Create and use a default launch template | bool |  `false` |
-| desired\_capacity | Desired number of workers | number | `var.workers_group_defaults[asg_desired_capacity]` |
-| disk\_size | Workers' disk size | number | Provider default behavior |
-| disk\_type | Workers' disk type. Require `create_launch_template` to be `true`| number | `gp3` |
-| enable_monitoring | Enables/disables detailed monitoring. Require `create_launch_template` to be `true`| bool | `true` |
-| eni_delete | Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying) | bool | `true` |
-| force\_update\_version | Force version update if existing pods are unable to be drained due to a pod disruption budget issue. | bool | Provider default behavior |
-| iam\_role\_arn | IAM role ARN for workers | string | `var.default_iam_role_arn` |
-| instance\_types | Node group's instance type(s). Multiple types can be specified when `capacity_type="SPOT"`. | list | `[var.workers_group_defaults[instance_type]]` |
-| k8s\_labels | Kubernetes labels | map(string) | No labels applied |
-| key\_name | Key name for workers. Set to empty string to disable remote access | string | `var.workers_group_defaults[key_name]` |
-| kubelet_extra_args | This string is passed directly to kubelet if set. Useful for adding labels or taints. Require `create_launch_template` to be `true`| string | "" |
-| launch_template_id | The id of a aws_launch_template to use | string | No LT used |
-| launch\_template_version | The version of the LT to use | string | none |
-| max\_capacity | Max number of workers | number | `var.workers_group_defaults[asg_max_size]` |
-| min\_capacity | Min number of workers | number | `var.workers_group_defaults[asg_min_size]` |
-| name | Name of the node group. If you don't really need this, we recommend you to use `name_prefix` instead. | string | Will use the autogenerate name prefix |
-| name_prefix | Name prefix of the node group | string | Auto generated |
-| pre_userdata | userdata to pre-append to the default userdata. Require `create_launch_template` to be `true`| string | "" |
-| public_ip | Associate a public ip address with a worker. Require `create_launch_template` to be `true`| string | `false`
-| source\_security\_group\_ids | Source security groups for remote access to workers | list(string) | If key\_name is specified: THE REMOTE ACCESS WILL BE OPENED TO THE WORLD |
-| subnets | Subnets to contain workers | list(string) | `var.workers_group_defaults[subnets]` |
-| version | Kubernetes version | string | Provider default behavior |
-| taints | Kubernetes node taints | list(map) | empty |
-
-<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
-## Requirements
-
-| Name | Version |
-|------|---------|
-| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
-| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.43.0 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.43.0 |
-| <a name="provider_cloudinit"></a> [cloudinit](#provider\_cloudinit) | n/a |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [aws_eks_node_group.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
-| [aws_launch_template.workers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
-| [cloudinit_config.workers_userdata](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | n/a | yes |
-| <a name="input_create_eks"></a> [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no |
-| <a name="input_default_iam_role_arn"></a> [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | n/a | yes |
-| <a name="input_ng_depends_on"></a> [ng\_depends\_on](#input\_ng\_depends\_on) | List of references to other resources this submodule depends on | `any` | `null` | no |
-| <a name="input_node_groups"></a> [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no |
-| <a name="input_node_groups_defaults"></a> [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | n/a | yes |
-| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | n/a | yes |
-| <a name="input_worker_additional_security_group_ids"></a> [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| <a name="input_worker_security_group_id"></a> [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no |
-| <a name="input_workers_group_defaults"></a> [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | n/a | yes |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| <a name="output_aws_auth_roles"></a> [aws\_auth\_roles](#output\_aws\_auth\_roles) | Roles for use in aws-auth ConfigMap |
-| <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values |
-<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
diff --git a/modules/node_groups/launch_template.tf b/modules/node_groups/launch_template.tf
deleted file mode 100644
index bdd05b3a2c..0000000000
--- a/modules/node_groups/launch_template.tf
+++ /dev/null
@@ -1,112 +0,0 @@
-data "cloudinit_config" "workers_userdata" {
-  for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
-
-  gzip          = false
-  base64_encode = true
-  boundary      = "//"
-
-  part {
-    content_type = "text/x-shellscript"
-    content = templatefile("${path.module}/templates/userdata.sh.tpl",
-      {
-        pre_userdata       = each.value["pre_userdata"]
-        kubelet_extra_args = each.value["kubelet_extra_args"]
-      }
-    )
-  }
-}
-
-# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
-# there are several more options one could set but you probably dont need to modify them
-# you can take the default and add your custom AMI and/or custom tags
-#
-# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
-# then the default user-data for bootstrapping a cluster is merged in the copy.
-resource "aws_launch_template" "workers" {
-  for_each = { for k, v in local.node_groups_expanded : k => v if v["create_launch_template"] }
-
-  name_prefix            = local.node_groups_names[each.key]
-  description            = format("EKS Managed Node Group custom LT for %s", local.node_groups_names[each.key])
-  update_default_version = true
-
-  block_device_mappings {
-    device_name = "/dev/xvda"
-
-    ebs {
-      volume_size           = lookup(each.value, "disk_size", null)
-      volume_type           = lookup(each.value, "disk_type", null)
-      delete_on_termination = true
-    }
-  }
-
-  instance_type = each.value["set_instance_types_on_lt"] ? element(each.value.instance_types, 0) : null
-
-  monitoring {
-    enabled = lookup(each.value, "enable_monitoring", null)
-  }
-
-  network_interfaces {
-    associate_public_ip_address = lookup(each.value, "public_ip", null)
-    delete_on_termination       = lookup(each.value, "eni_delete", null)
-    security_groups = flatten([
-      var.worker_security_group_id,
-      var.worker_additional_security_group_ids,
-      lookup(
-        each.value,
-        "additional_security_group_ids",
-        null,
-      ),
-    ])
-  }
-
-  # if you want to use a custom AMI
-  # image_id      = var.ami_id
-
-  # If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
-  # you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
-  #
-  # (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
-
-  user_data = data.cloudinit_config.workers_userdata[each.key].rendered
-
-  key_name = lookup(each.value, "key_name", null)
-
-  # Supplying custom tags to EKS instances is another use-case for LaunchTemplates
-  tag_specifications {
-    resource_type = "instance"
-
-    tags = merge(
-      var.tags,
-      lookup(var.node_groups_defaults, "additional_tags", {}),
-      lookup(var.node_groups[each.key], "additional_tags", {}),
-      {
-        Name = local.node_groups_names[each.key]
-      }
-    )
-  }
-
-  # Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
-  tag_specifications {
-    resource_type = "volume"
-
-    tags = merge(
-      var.tags,
-      lookup(var.node_groups_defaults, "additional_tags", {}),
-      lookup(var.node_groups[each.key], "additional_tags", {}),
-      {
-        Name = local.node_groups_names[each.key]
-      }
-    )
-  }
-
-  # Tag the LT itself
-  tags = merge(
-    var.tags,
-    lookup(var.node_groups_defaults, "additional_tags", {}),
-    lookup(var.node_groups[each.key], "additional_tags", {}),
-  )
-
-  lifecycle {
-    create_before_destroy = true
-  }
-}
diff --git a/modules/node_groups/locals.tf b/modules/node_groups/locals.tf
deleted file mode 100644
index ee026c86d7..0000000000
--- a/modules/node_groups/locals.tf
+++ /dev/null
@@ -1,39 +0,0 @@
-locals {
-  # Merge defaults and per-group values to make code cleaner
-  node_groups_expanded = { for k, v in var.node_groups : k => merge(
-    {
-      desired_capacity              = var.workers_group_defaults["asg_desired_capacity"]
-      iam_role_arn                  = var.default_iam_role_arn
-      instance_types                = [var.workers_group_defaults["instance_type"]]
-      key_name                      = var.workers_group_defaults["key_name"]
-      launch_template_id            = var.workers_group_defaults["launch_template_id"]
-      launch_template_version       = var.workers_group_defaults["launch_template_version"]
-      set_instance_types_on_lt      = false
-      max_capacity                  = var.workers_group_defaults["asg_max_size"]
-      min_capacity                  = var.workers_group_defaults["asg_min_size"]
-      subnets                       = var.workers_group_defaults["subnets"]
-      create_launch_template        = false
-      kubelet_extra_args            = var.workers_group_defaults["kubelet_extra_args"]
-      disk_size                     = var.workers_group_defaults["root_volume_size"]
-      disk_type                     = var.workers_group_defaults["root_volume_type"]
-      enable_monitoring             = var.workers_group_defaults["enable_monitoring"]
-      eni_delete                    = var.workers_group_defaults["eni_delete"]
-      public_ip                     = var.workers_group_defaults["public_ip"]
-      pre_userdata                  = var.workers_group_defaults["pre_userdata"]
-      additional_security_group_ids = var.workers_group_defaults["additional_security_group_ids"]
-      taints                        = []
-    },
-    var.node_groups_defaults,
-    v,
-  ) if var.create_eks }
-
-  node_groups_names = { for k, v in local.node_groups_expanded : k => lookup(
-    v,
-    "name",
-    lookup(
-      v,
-      "name_prefix",
-      join("-", [var.cluster_name, k])
-    )
-  ) }
-}
diff --git a/modules/node_groups/node_groups.tf b/modules/node_groups/node_groups.tf
deleted file mode 100644
index 6a1e3938e3..0000000000
--- a/modules/node_groups/node_groups.tf
+++ /dev/null
@@ -1,89 +0,0 @@
-resource "aws_eks_node_group" "workers" {
-  for_each = local.node_groups_expanded
-
-  node_group_name_prefix = lookup(each.value, "name", null) == null ? local.node_groups_names[each.key] : null
-  node_group_name        = lookup(each.value, "name", null)
-
-  cluster_name  = var.cluster_name
-  node_role_arn = each.value["iam_role_arn"]
-  subnet_ids    = each.value["subnets"]
-
-  scaling_config {
-    desired_size = each.value["desired_capacity"]
-    max_size     = each.value["max_capacity"]
-    min_size     = each.value["min_capacity"]
-  }
-
-  ami_type             = lookup(each.value, "ami_type", null)
-  disk_size            = each.value["launch_template_id"] != null || each.value["create_launch_template"] ? null : lookup(each.value, "disk_size", null)
-  instance_types       = !each.value["set_instance_types_on_lt"] ? each.value["instance_types"] : null
-  release_version      = lookup(each.value, "ami_release_version", null)
-  capacity_type        = lookup(each.value, "capacity_type", null)
-  force_update_version = lookup(each.value, "force_update_version", null)
-
-  dynamic "remote_access" {
-    for_each = each.value["key_name"] != "" && each.value["launch_template_id"] == null && !each.value["create_launch_template"] ? [{
-      ec2_ssh_key               = each.value["key_name"]
-      source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
-    }] : []
-
-    content {
-      ec2_ssh_key               = remote_access.value["ec2_ssh_key"]
-      source_security_group_ids = remote_access.value["source_security_group_ids"]
-    }
-  }
-
-  dynamic "launch_template" {
-    for_each = each.value["launch_template_id"] != null ? [{
-      id      = each.value["launch_template_id"]
-      version = each.value["launch_template_version"]
-    }] : []
-
-    content {
-      id      = launch_template.value["id"]
-      version = launch_template.value["version"]
-    }
-  }
-
-  dynamic "launch_template" {
-    for_each = each.value["launch_template_id"] == null && each.value["create_launch_template"] ? [{
-      id      = aws_launch_template.workers[each.key].id
-      version = each.value["launch_template_version"]
-    }] : []
-
-    content {
-      id      = launch_template.value["id"]
-      version = launch_template.value["version"]
-    }
-  }
-
-  dynamic "taint" {
-    for_each = each.value["taints"]
-
-    content {
-      key    = taint.value["key"]
-      value  = taint.value["value"]
-      effect = taint.value["effect"]
-    }
-  }
-
-  version = lookup(each.value, "version", null)
-
-  labels = merge(
-    lookup(var.node_groups_defaults, "k8s_labels", {}),
-    lookup(var.node_groups[each.key], "k8s_labels", {})
-  )
-
-  tags = merge(
-    var.tags,
-    lookup(var.node_groups_defaults, "additional_tags", {}),
-    lookup(var.node_groups[each.key], "additional_tags", {}),
-  )
-
-  lifecycle {
-    create_before_destroy = true
-    ignore_changes        = [scaling_config.0.desired_size]
-  }
-
-  depends_on = [var.ng_depends_on]
-}
diff --git a/modules/node_groups/outputs.tf b/modules/node_groups/outputs.tf
deleted file mode 100644
index ad148ea514..0000000000
--- a/modules/node_groups/outputs.tf
+++ /dev/null
@@ -1,14 +0,0 @@
-output "node_groups" {
-  description = "Outputs from EKS node groups. Map of maps, keyed by `var.node_groups` keys. See `aws_eks_node_group` Terraform documentation for values"
-  value       = aws_eks_node_group.workers
-}
-
-output "aws_auth_roles" {
-  description = "Roles for use in aws-auth ConfigMap"
-  value = [
-    for k, v in local.node_groups_expanded : {
-      worker_role_arn = lookup(v, "iam_role_arn", var.default_iam_role_arn)
-      platform        = "linux"
-    }
-  ]
-}
diff --git a/modules/node_groups/templates/userdata.sh.tpl b/modules/node_groups/templates/userdata.sh.tpl
deleted file mode 100644
index 3aecd0aabb..0000000000
--- a/modules/node_groups/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash -e
-
-# Allow user supplied pre userdata code
-${pre_userdata}
-
-sed -i '/^KUBELET_EXTRA_ARGS=/a KUBELET_EXTRA_ARGS+=" ${kubelet_extra_args}"' /etc/eks/bootstrap.sh
diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf
deleted file mode 100644
index 585beb5f91..0000000000
--- a/modules/node_groups/variables.tf
+++ /dev/null
@@ -1,56 +0,0 @@
-variable "create_eks" {
-  description = "Controls if EKS resources should be created (it affects almost all resources)"
-  type        = bool
-  default     = true
-}
-
-variable "cluster_name" {
-  description = "Name of parent cluster"
-  type        = string
-}
-
-variable "default_iam_role_arn" {
-  description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`"
-  type        = string
-}
-
-variable "workers_group_defaults" {
-  description = "Workers group defaults from parent"
-  type        = any
-}
-
-variable "worker_security_group_id" {
-  description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
-  type        = string
-  default     = ""
-}
-
-variable "worker_additional_security_group_ids" {
-  description = "A list of additional security group ids to attach to worker instances"
-  type        = list(string)
-  default     = []
-}
-
-variable "tags" {
-  description = "A map of tags to add to all resources"
-  type        = map(string)
-}
-
-variable "node_groups_defaults" {
-  description = "map of maps of node groups to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
-  type        = any
-}
-
-variable "node_groups" {
-  description = "Map of maps of `eks_node_groups` to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details"
-  type        = any
-  default     = {}
-}
-
-# Hack for a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2
-# Will be removed in Terraform 0.13 with the support of module's `depends_on` https://github.com/hashicorp/terraform/issues/10462
-variable "ng_depends_on" {
-  description = "List of references to other resources this submodule depends on"
-  type        = any
-  default     = null
-}
diff --git a/modules/node_groups/versions.tf b/modules/node_groups/versions.tf
deleted file mode 100644
index c68eb70217..0000000000
--- a/modules/node_groups/versions.tf
+++ /dev/null
@@ -1,7 +0,0 @@
-terraform {
-  required_version = ">= 0.13.1"
-
-  required_providers {
-    aws = ">= 3.43.0"
-  }
-}
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
new file mode 100644
index 0000000000..8b21f20b69
--- /dev/null
+++ b/modules/self-managed-node-group/README.md
@@ -0,0 +1,227 @@
+# Self Managed Node Group Module
+
+Configuration in this directory creates a Self Managed Node Group (AutoScaling Group) along with an IAM role, security group, and launch template
+
+## Usage
+
+```hcl
+module "self_managed_node_group" {
+  source = "terraform-aws-modules/eks/aws//modules/self-managed-node-group"
+
+  name                = "separate-self-mng"
+  cluster_name        = "my-cluster"
+  kubernetes_version     = "1.31"
+  cluster_endpoint    = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+  cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+
+  subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+  // The following variables are necessary if you decide to use the module outside of the parent EKS module context.
+  // Without it, the security groups of the nodes are empty and thus won't join the cluster.
+  vpc_security_group_ids = [
+    module.eks.cluster_primary_security_group_id,
+    module.eks.cluster_security_group_id,
+  ]
+
+  min_size     = 1
+  max_size     = 10
+  desired_size = 1
+
+  launch_template_name   = "separate-self-mng"
+  instance_type          = "m5.large"
+
+  tags = {
+    Environment = "dev"
+    Terraform   = "true"
+  }
+}
+```
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_user_data"></a> [user\_data](#module\_user\_data) | ../_user_data | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
+| [aws_eks_access_entry.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource |
+| [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource |
+| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
+| [aws_placement_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/placement_group) | resource |
+| [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_vpc_security_group_egress_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_egress_rule) | resource |
+| [aws_vpc_security_group_ingress_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_ec2_instance_type.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_instance_type) | data source |
+| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
+| [aws_ssm_parameter.ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source |
+| [aws_subnet.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_account_id"></a> [account\_id](#input\_account\_id) | The AWS account ID - pass through value to reduce number of GET requests from data sources | `string` | `""` | no |
+| <a name="input_additional_cluster_dns_ips"></a> [additional\_cluster\_dns\_ips](#input\_additional\_cluster\_dns\_ips) | Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*` | `list(string)` | `null` | no |
+| <a name="input_ami_id"></a> [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance | `string` | `""` | no |
+| <a name="input_ami_type"></a> [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the node group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `"AL2023_x86_64_STANDARD"` | no |
+| <a name="input_autoscaling_group_tags"></a> [autoscaling\_group\_tags](#input\_autoscaling\_group\_tags) | A map of additional tags to add to the autoscaling group created. Tags are applied to the autoscaling group only and are NOT propagated to instances | `map(string)` | `{}` | no |
+| <a name="input_availability_zones"></a> [availability\_zones](#input\_availability\_zones) | A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids` | `list(string)` | `null` | no |
+| <a name="input_block_device_mappings"></a> [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | <pre>map(object({<br/>    device_name = optional(string)<br/>    ebs = optional(object({<br/>      delete_on_termination      = optional(bool)<br/>      encrypted                  = optional(bool)<br/>      iops                       = optional(number)<br/>      kms_key_id                 = optional(string)<br/>      snapshot_id                = optional(string)<br/>      throughput                 = optional(number)<br/>      volume_initialization_rate = optional(number)<br/>      volume_size                = optional(number)<br/>      volume_type                = optional(string)<br/>    }))<br/>    no_device    = optional(string)<br/>    virtual_name = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_bootstrap_extra_args"></a> [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `null` | no |
+| <a name="input_capacity_rebalance"></a> [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no |
+| <a name="input_capacity_reservation_specification"></a> [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | <pre>object({<br/>    capacity_reservation_preference = optional(string)<br/>    capacity_reservation_target = optional(object({<br/>      capacity_reservation_id                 = optional(string)<br/>      capacity_reservation_resource_group_arn = optional(string)<br/>    }))<br/>  })</pre> | `null` | no |
+| <a name="input_cloudinit_post_nodeadm"></a> [cloudinit\_post\_nodeadm](#input\_cloudinit\_post\_nodeadm) | Array of cloud-init document parts that are created after the nodeadm document part | <pre>list(object({<br/>    content      = string<br/>    content_type = optional(string)<br/>    filename     = optional(string)<br/>    merge_type   = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_cloudinit_pre_nodeadm"></a> [cloudinit\_pre\_nodeadm](#input\_cloudinit\_pre\_nodeadm) | Array of cloud-init document parts that are created before the nodeadm document part | <pre>list(object({<br/>    content      = string<br/>    content_type = optional(string)<br/>    filename     = optional(string)<br/>    merge_type   = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_cluster_auth_base64"></a> [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `null` | no |
+| <a name="input_cluster_endpoint"></a> [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `null` | no |
+| <a name="input_cluster_ip_family"></a> [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no |
+| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `""` | no |
+| <a name="input_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no |
+| <a name="input_cluster_service_cidr"></a> [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself | `string` | `null` | no |
+| <a name="input_context"></a> [context](#input\_context) | Reserved | `string` | `null` | no |
+| <a name="input_cpu_options"></a> [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | <pre>object({<br/>    amd_sev_snp      = optional(string)<br/>    core_count       = optional(number)<br/>    threads_per_core = optional(number)<br/>  })</pre> | `null` | no |
+| <a name="input_create"></a> [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no |
+| <a name="input_create_access_entry"></a> [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the node group | `bool` | `true` | no |
+| <a name="input_create_autoscaling_group"></a> [create\_autoscaling\_group](#input\_create\_autoscaling\_group) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
+| <a name="input_create_iam_instance_profile"></a> [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no |
+| <a name="input_create_iam_role_policy"></a> [create\_iam\_role\_policy](#input\_create\_iam\_role\_policy) | Determines whether an IAM role policy is created or not | `bool` | `true` | no |
+| <a name="input_create_launch_template"></a> [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
+| <a name="input_create_placement_group"></a> [create\_placement\_group](#input\_create\_placement\_group) | Determines whether a placement group is created & used by the node group | `bool` | `false` | no |
+| <a name="input_create_security_group"></a> [create\_security\_group](#input\_create\_security\_group) | Determines if a security group is created | `bool` | `true` | no |
+| <a name="input_credit_specification"></a> [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | <pre>object({<br/>    cpu_credits = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_default_instance_warmup"></a> [default\_instance\_warmup](#input\_default\_instance\_warmup) | Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data | `number` | `null` | no |
+| <a name="input_desired_size"></a> [desired\_size](#input\_desired\_size) | The number of Amazon EC2 instances that should be running in the autoscaling group | `number` | `1` | no |
+| <a name="input_desired_size_type"></a> [desired\_size\_type](#input\_desired\_size\_type) | The unit of measurement for the value specified for `desired_size`. Supported for attribute-based instance type selection only. Valid values: `units`, `vcpu`, `memory-mib` | `string` | `null` | no |
+| <a name="input_disable_api_termination"></a> [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no |
+| <a name="input_ebs_optimized"></a> [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no |
+| <a name="input_efa_indices"></a> [efa\_indices](#input\_efa\_indices) | The indices of the network interfaces that should be EFA-enabled. Only valid when `enable_efa_support` = `true` | `list(number)` | <pre>[<br/>  0<br/>]</pre> | no |
+| <a name="input_enable_efa_only"></a> [enable\_efa\_only](#input\_enable\_efa\_only) | Determines whether to enable EFA (`false`, default) or EFA and EFA-only (`true`) network interfaces. Note: requires vpc-cni version `v1.18.4` or later | `bool` | `true` | no |
+| <a name="input_enable_efa_support"></a> [enable\_efa\_support](#input\_enable\_efa\_support) | Determines whether to enable Elastic Fabric Adapter (EFA) support | `bool` | `false` | no |
+| <a name="input_enable_monitoring"></a> [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `false` | no |
+| <a name="input_enabled_metrics"></a> [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `[]` | no |
+| <a name="input_enclave_options"></a> [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | <pre>object({<br/>    enabled = optional(bool)<br/>  })</pre> | `null` | no |
+| <a name="input_force_delete"></a> [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no |
+| <a name="input_health_check_grace_period"></a> [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no |
+| <a name="input_health_check_type"></a> [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
+| <a name="input_iam_instance_profile_arn"></a> [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no |
+| <a name="input_iam_role_additional_policies"></a> [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no |
+| <a name="input_iam_role_arn"></a> [iam\_role\_arn](#input\_iam\_role\_arn) | ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false` | `string` | `null` | no |
+| <a name="input_iam_role_attach_cni_policy"></a> [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no |
+| <a name="input_iam_role_description"></a> [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `"Self managed node group IAM role"` | no |
+| <a name="input_iam_role_name"></a> [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
+| <a name="input_iam_role_path"></a> [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `null` | no |
+| <a name="input_iam_role_permissions_boundary"></a> [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
+| <a name="input_iam_role_policy_statements"></a> [iam\_role\_policy\_statements](#input\_iam\_role\_policy\_statements) | A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed | <pre>list(object({<br/>    sid           = optional(string)<br/>    actions       = optional(list(string))<br/>    not_actions   = optional(list(string))<br/>    effect        = optional(string)<br/>    resources     = optional(list(string))<br/>    not_resources = optional(list(string))<br/>    principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    not_principals = optional(list(object({<br/>      type        = string<br/>      identifiers = list(string)<br/>    })))<br/>    condition = optional(list(object({<br/>      test     = string<br/>      values   = list(string)<br/>      variable = string<br/>    })))<br/>  }))</pre> | `null` | no |
+| <a name="input_iam_role_tags"></a> [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
+| <a name="input_iam_role_use_name_prefix"></a> [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_ignore_failed_scaling_activities"></a> [ignore\_failed\_scaling\_activities](#input\_ignore\_failed\_scaling\_activities) | Whether to ignore failed Auto Scaling scaling activities while waiting for capacity | `bool` | `null` | no |
+| <a name="input_initial_lifecycle_hooks"></a> [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | <pre>list(object({<br/>    default_result          = optional(string)<br/>    heartbeat_timeout       = optional(number)<br/>    lifecycle_transition    = string<br/>    name                    = string<br/>    notification_metadata   = optional(string)<br/>    notification_target_arn = optional(string)<br/>    role_arn                = optional(string)<br/>  }))</pre> | `null` | no |
+| <a name="input_instance_initiated_shutdown_behavior"></a> [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no |
+| <a name="input_instance_maintenance_policy"></a> [instance\_maintenance\_policy](#input\_instance\_maintenance\_policy) | If this block is configured, add a instance maintenance policy to the specified Auto Scaling group | <pre>object({<br/>    max_healthy_percentage = number<br/>    min_healthy_percentage = number<br/>  })</pre> | `null` | no |
+| <a name="input_instance_market_options"></a> [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | <pre>object({<br/>    market_type = optional(string)<br/>    spot_options = optional(object({<br/>      block_duration_minutes         = optional(number)<br/>      instance_interruption_behavior = optional(string)<br/>      max_price                      = optional(string)<br/>      spot_instance_type             = optional(string)<br/>      valid_until                    = optional(string)<br/>    }))<br/>  })</pre> | `null` | no |
+| <a name="input_instance_refresh"></a> [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | <pre>object({<br/>    preferences = optional(object({<br/>      alarm_specification = optional(object({<br/>        alarms = optional(list(string))<br/>      }))<br/>      auto_rollback                = optional(bool)<br/>      checkpoint_delay             = optional(number)<br/>      checkpoint_percentages       = optional(list(number))<br/>      instance_warmup              = optional(number)<br/>      max_healthy_percentage       = optional(number)<br/>      min_healthy_percentage       = optional(number, 33)<br/>      scale_in_protected_instances = optional(string)<br/>      skip_matching                = optional(bool)<br/>      standby_instances            = optional(string)<br/>    }))<br/>    strategy = optional(string, "Rolling")<br/>    triggers = optional(list(string))<br/>  })</pre> | <pre>{<br/>  "preferences": {<br/>    "min_healthy_percentage": 66<br/>  },<br/>  "strategy": "Rolling"<br/>}</pre> | no |
+| <a name="input_instance_requirements"></a> [instance\_requirements](#input\_instance\_requirements) | The attribute requirements for the type of instance. If present then `instance_type` cannot be present | <pre>object({<br/>    accelerator_count = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    accelerator_manufacturers = optional(list(string))<br/>    accelerator_names         = optional(list(string))<br/>    accelerator_total_memory_mib = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    accelerator_types      = optional(list(string))<br/>    allowed_instance_types = optional(list(string))<br/>    bare_metal             = optional(string)<br/>    baseline_ebs_bandwidth_mbps = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    burstable_performance                                   = optional(string)<br/>    cpu_manufacturers                                       = optional(list(string))<br/>    excluded_instance_types                                 = optional(list(string))<br/>    instance_generations                                    = optional(list(string))<br/>    local_storage                                           = optional(string)<br/>    local_storage_types                                     = optional(list(string))<br/>    max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)<br/>    memory_gib_per_vcpu = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    memory_mib = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    network_bandwidth_gbps = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    network_interface_count = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    on_demand_max_price_percentage_over_lowest_price = optional(number)<br/>    require_hibernate_support                        = optional(bool)<br/>    spot_max_price_percentage_over_lowest_price      = optional(number)<br/>    total_local_storage_gb = optional(object({<br/>      max = optional(number)<br/>      min = optional(number)<br/>    }))<br/>    vcpu_count = optional(object({<br/>      max = optional(number)<br/>      min = string<br/>    }))<br/>  })</pre> | `null` | no |
+| <a name="input_instance_type"></a> [instance\_type](#input\_instance\_type) | The type of the instance to launch | `string` | `"m6i.large"` | no |
+| <a name="input_kernel_id"></a> [kernel\_id](#input\_kernel\_id) | The kernel ID | `string` | `null` | no |
+| <a name="input_key_name"></a> [key\_name](#input\_key\_name) | The key name that should be used for the instance | `string` | `null` | no |
+| <a name="input_kubernetes_version"></a> [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no |
+| <a name="input_launch_template_default_version"></a> [launch\_template\_default\_version](#input\_launch\_template\_default\_version) | Default Version of the launch template | `string` | `null` | no |
+| <a name="input_launch_template_description"></a> [launch\_template\_description](#input\_launch\_template\_description) | Description of the launch template | `string` | `null` | no |
+| <a name="input_launch_template_id"></a> [launch\_template\_id](#input\_launch\_template\_id) | The ID of an existing launch template to use. Required when `create_launch_template` = `false` | `string` | `""` | no |
+| <a name="input_launch_template_name"></a> [launch\_template\_name](#input\_launch\_template\_name) | Name of launch template to be created | `string` | `null` | no |
+| <a name="input_launch_template_tags"></a> [launch\_template\_tags](#input\_launch\_template\_tags) | A map of additional tags to add to the tag\_specifications of launch template created | `map(string)` | `{}` | no |
+| <a name="input_launch_template_use_name_prefix"></a> [launch\_template\_use\_name\_prefix](#input\_launch\_template\_use\_name\_prefix) | Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix | `bool` | `true` | no |
+| <a name="input_launch_template_version"></a> [launch\_template\_version](#input\_launch\_template\_version) | Launch template version. Can be version number, `$Latest`, or `$Default` | `string` | `null` | no |
+| <a name="input_license_specifications"></a> [license\_specifications](#input\_license\_specifications) | A list of license specifications to associate with | <pre>list(object({<br/>    license_configuration_arn = string<br/>  }))</pre> | `null` | no |
+| <a name="input_maintenance_options"></a> [maintenance\_options](#input\_maintenance\_options) | The maintenance options for the instance | <pre>object({<br/>    auto_recovery = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_max_instance_lifetime"></a> [max\_instance\_lifetime](#input\_max\_instance\_lifetime) | The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds | `number` | `null` | no |
+| <a name="input_max_size"></a> [max\_size](#input\_max\_size) | The maximum size of the autoscaling group | `number` | `3` | no |
+| <a name="input_metadata_options"></a> [metadata\_options](#input\_metadata\_options) | Customize the metadata options for the instance | <pre>object({<br/>    http_endpoint               = optional(string, "enabled")<br/>    http_protocol_ipv6          = optional(string)<br/>    http_put_response_hop_limit = optional(number, 1)<br/>    http_tokens                 = optional(string, "required")<br/>    instance_metadata_tags      = optional(string)<br/>  })</pre> | <pre>{<br/>  "http_endpoint": "enabled",<br/>  "http_put_response_hop_limit": 1,<br/>  "http_tokens": "required"<br/>}</pre> | no |
+| <a name="input_metrics_granularity"></a> [metrics\_granularity](#input\_metrics\_granularity) | The granularity to associate with the metrics to collect. The only valid value is `1Minute` | `string` | `null` | no |
+| <a name="input_min_size"></a> [min\_size](#input\_min\_size) | The minimum size of the autoscaling group | `number` | `1` | no |
+| <a name="input_mixed_instances_policy"></a> [mixed\_instances\_policy](#input\_mixed\_instances\_policy) | Configuration block containing settings to define launch targets for Auto Scaling groups | <pre>object({<br/>    instances_distribution = optional(object({<br/>      on_demand_allocation_strategy            = optional(string)<br/>      on_demand_base_capacity                  = optional(number)<br/>      on_demand_percentage_above_base_capacity = optional(number)<br/>      spot_allocation_strategy                 = optional(string)<br/>      spot_instance_pools                      = optional(number)<br/>      spot_max_price                           = optional(string)<br/>    }))<br/>    launch_template = object({<br/>      override = optional(list(object({<br/>        instance_requirements = optional(object({<br/>          accelerator_count = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          accelerator_manufacturers = optional(list(string))<br/>          accelerator_names         = optional(list(string))<br/>          accelerator_total_memory_mib = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          accelerator_types      = optional(list(string))<br/>          allowed_instance_types = optional(list(string))<br/>          bare_metal             = optional(string)<br/>          baseline_ebs_bandwidth_mbps = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          burstable_performance                                   = optional(string)<br/>          cpu_manufacturers                                       = optional(list(string))<br/>          excluded_instance_types                                 = optional(list(string))<br/>          instance_generations                                    = optional(list(string))<br/>          local_storage                                           = optional(string)<br/>          local_storage_types                                     = optional(list(string))<br/>          max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)<br/>          memory_gib_per_vcpu = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          memory_mib = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          network_bandwidth_gbps = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          network_interface_count = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          on_demand_max_price_percentage_over_lowest_price = optional(number)<br/>          require_hibernate_support                        = optional(bool)<br/>          spot_max_price_percentage_over_lowest_price      = optional(number)<br/>          total_local_storage_gb = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>          vcpu_count = optional(object({<br/>            max = optional(number)<br/>            min = optional(number)<br/>          }))<br/>        }))<br/>        instance_type = optional(string)<br/>        launch_template_specification = optional(object({<br/>          launch_template_id   = optional(string)<br/>          launch_template_name = optional(string)<br/>          version              = optional(string)<br/>        }))<br/>        weighted_capacity = optional(string)<br/>      })))<br/>    })<br/>  })</pre> | `null` | no |
+| <a name="input_name"></a> [name](#input\_name) | Name of the Self managed Node Group | `string` | `""` | no |
+| <a name="input_network_interfaces"></a> [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | <pre>list(object({<br/>    associate_carrier_ip_address = optional(bool)<br/>    associate_public_ip_address  = optional(bool)<br/>    connection_tracking_specification = optional(object({<br/>      tcp_established_timeout = optional(number)<br/>      udp_stream_timeout      = optional(number)<br/>      udp_timeout             = optional(number)<br/>    }))<br/>    delete_on_termination = optional(bool)<br/>    description           = optional(string)<br/>    device_index          = optional(number)<br/>    ena_srd_specification = optional(object({<br/>      ena_srd_enabled = optional(bool)<br/>      ena_srd_udp_specification = optional(object({<br/>        ena_srd_udp_enabled = optional(bool)<br/>      }))<br/>    }))<br/>    interface_type       = optional(string)<br/>    ipv4_address_count   = optional(number)<br/>    ipv4_addresses       = optional(list(string))<br/>    ipv4_prefix_count    = optional(number)<br/>    ipv4_prefixes        = optional(list(string))<br/>    ipv6_address_count   = optional(number)<br/>    ipv6_addresses       = optional(list(string))<br/>    ipv6_prefix_count    = optional(number)<br/>    ipv6_prefixes        = optional(list(string))<br/>    network_card_index   = optional(number)<br/>    network_interface_id = optional(string)<br/>    primary_ipv6         = optional(bool)<br/>    private_ip_address   = optional(string)<br/>    security_groups      = optional(list(string), [])<br/>    subnet_id            = optional(string)<br/>  }))</pre> | `[]` | no |
+| <a name="input_partition"></a> [partition](#input\_partition) | The AWS partition - pass through value to reduce number of GET requests from data sources | `string` | `""` | no |
+| <a name="input_placement"></a> [placement](#input\_placement) | The placement of the instance | <pre>object({<br/>    affinity                = optional(string)<br/>    availability_zone       = optional(string)<br/>    group_name              = optional(string)<br/>    host_id                 = optional(string)<br/>    host_resource_group_arn = optional(string)<br/>    partition_number        = optional(number)<br/>    spread_domain           = optional(string)<br/>    tenancy                 = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_placement_group"></a> [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances | `string` | `null` | no |
+| <a name="input_post_bootstrap_user_data"></a> [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `null` | no |
+| <a name="input_pre_bootstrap_user_data"></a> [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `null` | no |
+| <a name="input_private_dns_name_options"></a> [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | <pre>object({<br/>    enable_resource_name_dns_aaaa_record = optional(bool)<br/>    enable_resource_name_dns_a_record    = optional(bool)<br/>    hostname_type                        = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_protect_from_scale_in"></a> [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events | `bool` | `false` | no |
+| <a name="input_ram_disk_id"></a> [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no |
+| <a name="input_region"></a> [region](#input\_region) | Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration | `string` | `null` | no |
+| <a name="input_security_group_description"></a> [security\_group\_description](#input\_security\_group\_description) | Description of the security group created | `string` | `null` | no |
+| <a name="input_security_group_egress_rules"></a> [security\_group\_egress\_rules](#input\_security\_group\_egress\_rules) | Security group egress rules to add to the security group created | <pre>map(object({<br/>    name = optional(string)<br/><br/>    cidr_ipv4                    = optional(string)<br/>    cidr_ipv6                    = optional(string)<br/>    description                  = optional(string)<br/>    from_port                    = optional(string)<br/>    ip_protocol                  = optional(string, "tcp")<br/>    prefix_list_id               = optional(string)<br/>    referenced_security_group_id = optional(string)<br/>    self                         = optional(bool, false)<br/>    tags                         = optional(map(string), {})<br/>    to_port                      = optional(string)<br/>  }))</pre> | `{}` | no |
+| <a name="input_security_group_ingress_rules"></a> [security\_group\_ingress\_rules](#input\_security\_group\_ingress\_rules) | Security group ingress rules to add to the security group created | <pre>map(object({<br/>    name = optional(string)<br/><br/>    cidr_ipv4                    = optional(string)<br/>    cidr_ipv6                    = optional(string)<br/>    description                  = optional(string)<br/>    from_port                    = optional(string)<br/>    ip_protocol                  = optional(string, "tcp")<br/>    prefix_list_id               = optional(string)<br/>    referenced_security_group_id = optional(string)<br/>    self                         = optional(bool, false)<br/>    tags                         = optional(map(string), {})<br/>    to_port                      = optional(string)<br/>  }))</pre> | `{}` | no |
+| <a name="input_security_group_name"></a> [security\_group\_name](#input\_security\_group\_name) | Name to use on security group created | `string` | `null` | no |
+| <a name="input_security_group_tags"></a> [security\_group\_tags](#input\_security\_group\_tags) | A map of additional tags to add to the security group created | `map(string)` | `{}` | no |
+| <a name="input_security_group_use_name_prefix"></a> [security\_group\_use\_name\_prefix](#input\_security\_group\_use\_name\_prefix) | Determines whether the security group name (`security_group_name`) is used as a prefix | `bool` | `true` | no |
+| <a name="input_subnet_ids"></a> [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones` | `list(string)` | `null` | no |
+| <a name="input_suspended_processes"></a> [suspended\_processes](#input\_suspended\_processes) | A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly | `list(string)` | `[]` | no |
+| <a name="input_tag_specifications"></a> [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(string)` | <pre>[<br/>  "instance",<br/>  "volume",<br/>  "network-interface"<br/>]</pre> | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
+| <a name="input_termination_policies"></a> [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default` | `list(string)` | `[]` | no |
+| <a name="input_timeouts"></a> [timeouts](#input\_timeouts) | Timeout configurations for the autoscaling group | <pre>object({<br/>    delete = optional(string)<br/>  })</pre> | `null` | no |
+| <a name="input_update_launch_template_default_version"></a> [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update Default Version each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no |
+| <a name="input_use_mixed_instances_policy"></a> [use\_mixed\_instances\_policy](#input\_use\_mixed\_instances\_policy) | Determines whether to use a mixed instances policy in the autoscaling group or not | `bool` | `false` | no |
+| <a name="input_use_name_prefix"></a> [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no |
+| <a name="input_user_data_template_path"></a> [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `null` | no |
+| <a name="input_vpc_security_group_ids"></a> [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_access_entry_arn"></a> [access\_entry\_arn](#output\_access\_entry\_arn) | Amazon Resource Name (ARN) of the Access Entry |
+| <a name="output_autoscaling_group_arn"></a> [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this autoscaling group |
+| <a name="output_autoscaling_group_availability_zones"></a> [autoscaling\_group\_availability\_zones](#output\_autoscaling\_group\_availability\_zones) | The availability zones of the autoscaling group |
+| <a name="output_autoscaling_group_default_cooldown"></a> [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity |
+| <a name="output_autoscaling_group_desired_capacity"></a> [autoscaling\_group\_desired\_capacity](#output\_autoscaling\_group\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group |
+| <a name="output_autoscaling_group_health_check_grace_period"></a> [autoscaling\_group\_health\_check\_grace\_period](#output\_autoscaling\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health |
+| <a name="output_autoscaling_group_health_check_type"></a> [autoscaling\_group\_health\_check\_type](#output\_autoscaling\_group\_health\_check\_type) | EC2 or ELB. Controls how health checking is done |
+| <a name="output_autoscaling_group_id"></a> [autoscaling\_group\_id](#output\_autoscaling\_group\_id) | The autoscaling group id |
+| <a name="output_autoscaling_group_max_size"></a> [autoscaling\_group\_max\_size](#output\_autoscaling\_group\_max\_size) | The maximum size of the autoscaling group |
+| <a name="output_autoscaling_group_min_size"></a> [autoscaling\_group\_min\_size](#output\_autoscaling\_group\_min\_size) | The minimum size of the autoscaling group |
+| <a name="output_autoscaling_group_name"></a> [autoscaling\_group\_name](#output\_autoscaling\_group\_name) | The autoscaling group name |
+| <a name="output_autoscaling_group_vpc_zone_identifier"></a> [autoscaling\_group\_vpc\_zone\_identifier](#output\_autoscaling\_group\_vpc\_zone\_identifier) | The VPC zone identifier |
+| <a name="output_iam_instance_profile_arn"></a> [iam\_instance\_profile\_arn](#output\_iam\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile |
+| <a name="output_iam_instance_profile_id"></a> [iam\_instance\_profile\_id](#output\_iam\_instance\_profile\_id) | Instance profile's ID |
+| <a name="output_iam_instance_profile_unique"></a> [iam\_instance\_profile\_unique](#output\_iam\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile |
+| <a name="output_iam_role_arn"></a> [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role |
+| <a name="output_iam_role_name"></a> [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role |
+| <a name="output_iam_role_unique_id"></a> [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_image_id"></a> [image\_id](#output\_image\_id) | ID of the image |
+| <a name="output_launch_template_arn"></a> [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template |
+| <a name="output_launch_template_id"></a> [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template |
+| <a name="output_launch_template_latest_version"></a> [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template |
+| <a name="output_launch_template_name"></a> [launch\_template\_name](#output\_launch\_template\_name) | The name of the launch template |
+| <a name="output_security_group_arn"></a> [security\_group\_arn](#output\_security\_group\_arn) | Amazon Resource Name (ARN) of the security group |
+| <a name="output_security_group_id"></a> [security\_group\_id](#output\_security\_group\_id) | ID of the security group |
+| <a name="output_user_data"></a> [user\_data](#output\_user\_data) | Base64 encoded user data |
+<!-- END_TF_DOCS -->
diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf
new file mode 100644
index 0000000000..b30476cbdf
--- /dev/null
+++ b/modules/self-managed-node-group/main.tf
@@ -0,0 +1,1072 @@
+data "aws_partition" "current" {
+  count = var.create && var.partition == "" ? 1 : 0
+}
+data "aws_caller_identity" "current" {
+  count = var.create && var.account_id == "" ? 1 : 0
+}
+
+locals {
+  partition  = try(data.aws_partition.current[0].partition, var.partition)
+  account_id = try(data.aws_caller_identity.current[0].account_id, var.account_id)
+}
+
+################################################################################
+# AMI SSM Parameter
+################################################################################
+
+locals {
+  # Just to ensure templating doesn't fail when values are not provided
+  ssm_kubernetes_version = var.kubernetes_version != null ? var.kubernetes_version : ""
+
+  # Map the AMI type to the respective SSM param path
+  ami_type_to_ssm_param = {
+    AL2_x86_64                 = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2/recommended/image_id"
+    AL2_x86_64_GPU             = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2-gpu/recommended/image_id"
+    AL2_ARM_64                 = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2-arm64/recommended/image_id"
+    BOTTLEROCKET_ARM_64        = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}/arm64/latest/image_id"
+    BOTTLEROCKET_x86_64        = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}/x86_64/latest/image_id"
+    BOTTLEROCKET_ARM_64_FIPS   = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-fips/arm64/latest/image_id"
+    BOTTLEROCKET_x86_64_FIPS   = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-fips/x86_64/latest/image_id"
+    BOTTLEROCKET_ARM_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-nvidia/arm64/latest/image_id"
+    BOTTLEROCKET_x86_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_kubernetes_version}-nvidia/x86_64/latest/image_id"
+    WINDOWS_CORE_2019_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Full-EKS_Optimized-${local.ssm_kubernetes_version}/image_id"
+    WINDOWS_FULL_2019_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Core-EKS_Optimized-${local.ssm_kubernetes_version}/image_id"
+    WINDOWS_CORE_2022_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Full-EKS_Optimized-${local.ssm_kubernetes_version}/image_id"
+    WINDOWS_FULL_2022_x86_64   = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Core-EKS_Optimized-${local.ssm_kubernetes_version}/image_id"
+    AL2023_x86_64_STANDARD     = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/x86_64/standard/recommended/image_id"
+    AL2023_ARM_64_STANDARD     = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/arm64/standard/recommended/image_id"
+    AL2023_x86_64_NEURON       = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/x86_64/neuron/recommended/image_id"
+    AL2023_x86_64_NVIDIA       = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/x86_64/nvidia/recommended/image_id"
+    AL2023_ARM_64_NVIDIA       = "/aws/service/eks/optimized-ami/${local.ssm_kubernetes_version}/amazon-linux-2023/arm64/nvidia/recommended/image_id"
+  }
+}
+
+data "aws_ssm_parameter" "ami" {
+  count = var.create ? 1 : 0
+
+  region = var.region
+
+  name = local.ami_type_to_ssm_param[var.ami_type]
+}
+
+################################################################################
+# User Data
+################################################################################
+
+module "user_data" {
+  source = "../_user_data"
+
+  create                    = var.create
+  ami_type                  = var.ami_type
+  is_eks_managed_node_group = false
+
+  cluster_name               = var.cluster_name
+  cluster_endpoint           = var.cluster_endpoint
+  cluster_auth_base64        = var.cluster_auth_base64
+  cluster_ip_family          = var.cluster_ip_family
+  cluster_service_cidr       = var.cluster_service_cidr
+  additional_cluster_dns_ips = var.additional_cluster_dns_ips
+
+  enable_bootstrap_user_data = true
+  pre_bootstrap_user_data    = var.pre_bootstrap_user_data
+  post_bootstrap_user_data   = var.post_bootstrap_user_data
+  bootstrap_extra_args       = var.bootstrap_extra_args
+  user_data_template_path    = var.user_data_template_path
+
+  cloudinit_pre_nodeadm  = var.cloudinit_pre_nodeadm
+  cloudinit_post_nodeadm = var.cloudinit_post_nodeadm
+}
+
+################################################################################
+# EFA Support
+################################################################################
+
+data "aws_ec2_instance_type" "this" {
+  count = var.create && var.enable_efa_support ? 1 : 0
+
+  region = var.region
+
+  instance_type = var.instance_type
+}
+
+locals {
+  enable_efa_support = var.create && var.enable_efa_support && local.instance_type_provided
+
+  instance_type_provided = var.instance_type != ""
+  num_network_cards      = try(data.aws_ec2_instance_type.this[0].maximum_network_cards, 0)
+
+  # Primary network interface must be EFA, remaining can be EFA or EFA-only
+  efa_network_interfaces = [
+    for i in range(local.num_network_cards) : {
+      associate_public_ip_address = false
+      delete_on_termination       = true
+      device_index                = i == 0 ? 0 : 1
+      network_card_index          = i
+      interface_type              = var.enable_efa_only ? contains(concat([0], var.efa_indices), i) ? "efa" : "efa-only" : "efa"
+    }
+  ]
+
+  network_interfaces = local.enable_efa_support ? local.efa_network_interfaces : var.network_interfaces
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+locals {
+  launch_template_name = coalesce(var.launch_template_name, "${var.name}-node-group")
+  security_group_ids   = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids))
+}
+
+resource "aws_launch_template" "this" {
+  count = var.create && var.create_launch_template ? 1 : 0
+
+  region = var.region
+
+  dynamic "block_device_mappings" {
+    for_each = var.block_device_mappings != null ? var.block_device_mappings : {}
+
+    content {
+      device_name = block_device_mappings.value.device_name
+
+      dynamic "ebs" {
+        for_each = block_device_mappings.value.ebs != null ? [block_device_mappings.value.ebs] : []
+
+        content {
+          delete_on_termination      = ebs.value.delete_on_termination
+          encrypted                  = ebs.value.encrypted
+          iops                       = ebs.value.iops
+          kms_key_id                 = ebs.value.kms_key_id
+          snapshot_id                = ebs.value.snapshot_id
+          throughput                 = ebs.value.throughput
+          volume_initialization_rate = ebs.value.volume_initialization_rate
+          volume_size                = ebs.value.volume_size
+          volume_type                = ebs.value.volume_type
+        }
+      }
+
+      no_device    = block_device_mappings.value.no_device
+      virtual_name = block_device_mappings.value.virtual_name
+    }
+  }
+
+  dynamic "capacity_reservation_specification" {
+    for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : []
+
+    content {
+      capacity_reservation_preference = capacity_reservation_specification.value.capacity_reservation_preference
+
+      dynamic "capacity_reservation_target" {
+        for_each = capacity_reservation_specification.value.capacity_reservation_target != null ? [capacity_reservation_specification.value.capacity_reservation_target] : []
+        content {
+          capacity_reservation_id                 = capacity_reservation_target.value.capacity_reservation_id
+          capacity_reservation_resource_group_arn = capacity_reservation_target.value.capacity_reservation_resource_group_arn
+        }
+      }
+    }
+  }
+
+  dynamic "cpu_options" {
+    for_each = var.cpu_options != null ? [var.cpu_options] : []
+
+    content {
+      amd_sev_snp      = cpu_options.value.amd_sev_snp
+      core_count       = cpu_options.value.core_count
+      threads_per_core = cpu_options.value.threads_per_core
+    }
+  }
+
+  dynamic "credit_specification" {
+    for_each = var.credit_specification != null ? [var.credit_specification] : []
+
+    content {
+      cpu_credits = credit_specification.value.cpu_credits
+    }
+  }
+
+  default_version         = var.launch_template_default_version
+  description             = var.launch_template_description
+  disable_api_termination = var.disable_api_termination
+  ebs_optimized           = var.ebs_optimized
+
+  dynamic "enclave_options" {
+    for_each = var.enclave_options != null ? [var.enclave_options] : []
+
+    content {
+      enabled = enclave_options.value.enabled
+    }
+  }
+
+  iam_instance_profile {
+    arn = var.create_iam_instance_profile ? aws_iam_instance_profile.this[0].arn : var.iam_instance_profile_arn
+  }
+
+  image_id                             = coalesce(var.ami_id, nonsensitive(data.aws_ssm_parameter.ami[0].value))
+  instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior
+
+  dynamic "instance_market_options" {
+    for_each = var.instance_market_options != null ? [var.instance_market_options] : []
+
+    content {
+      market_type = instance_market_options.value.market_type
+
+      dynamic "spot_options" {
+        for_each = instance_market_options.value.spot_options != null ? [instance_market_options.value.spot_options] : []
+
+        content {
+          block_duration_minutes         = spot_options.value.block_duration_minutes
+          instance_interruption_behavior = spot_options.value.instance_interruption_behavior
+          max_price                      = spot_options.value.max_price
+          spot_instance_type             = spot_options.value.spot_instance_type
+          valid_until                    = spot_options.value.valid_until
+        }
+      }
+    }
+  }
+
+  dynamic "instance_requirements" {
+    for_each = var.instance_requirements != null ? [var.instance_requirements] : []
+
+    content {
+      dynamic "accelerator_count" {
+        for_each = instance_requirements.value.accelerator_count != null ? [instance_requirements.value.accelerator_count] : []
+
+        content {
+          max = accelerator_count.value.max
+          min = accelerator_count.value.min
+        }
+      }
+
+      accelerator_manufacturers = instance_requirements.value.accelerator_manufacturers
+      accelerator_names         = instance_requirements.value.accelerator_names
+
+      dynamic "accelerator_total_memory_mib" {
+        for_each = instance_requirements.value.accelerator_total_memory_mib != null ? [instance_requirements.value.accelerator_total_memory_mib] : []
+
+        content {
+          max = accelerator_total_memory_mib.value.max
+          min = accelerator_total_memory_mib.value.min
+        }
+      }
+
+      accelerator_types      = instance_requirements.value.accelerator_types
+      allowed_instance_types = instance_requirements.value.allowed_instance_types
+      bare_metal             = instance_requirements.value.bare_metal
+
+      dynamic "baseline_ebs_bandwidth_mbps" {
+        for_each = instance_requirements.value.baseline_ebs_bandwidth_mbps != null ? [instance_requirements.value.baseline_ebs_bandwidth_mbps] : []
+
+        content {
+          max = baseline_ebs_bandwidth_mbps.value.max
+          min = baseline_ebs_bandwidth_mbps.value.min
+        }
+      }
+
+      burstable_performance                                   = instance_requirements.value.burstable_performance
+      cpu_manufacturers                                       = instance_requirements.value.cpu_manufacturers
+      excluded_instance_types                                 = instance_requirements.value.excluded_instance_types
+      instance_generations                                    = instance_requirements.value.instance_generations
+      local_storage                                           = instance_requirements.value.local_storage
+      local_storage_types                                     = instance_requirements.value.local_storage_types
+      max_spot_price_as_percentage_of_optimal_on_demand_price = instance_requirements.value.max_spot_price_as_percentage_of_optimal_on_demand_price
+
+      dynamic "memory_gib_per_vcpu" {
+        for_each = instance_requirements.value.memory_gib_per_vcpu != null ? [instance_requirements.value.memory_gib_per_vcpu] : []
+
+        content {
+          max = memory_gib_per_vcpu.value.max
+          min = memory_gib_per_vcpu.value.min
+        }
+      }
+
+      dynamic "memory_mib" {
+        for_each = instance_requirements.value.memory_mib != null ? [instance_requirements.value.memory_mib] : []
+
+        content {
+          max = memory_mib.value.max
+          min = memory_mib.value.min
+        }
+      }
+
+      dynamic "network_interface_count" {
+        for_each = instance_requirements.value.network_interface_count != null ? [instance_requirements.value.network_interface_count] : []
+
+        content {
+          max = network_interface_count.value.max
+          min = network_interface_count.value.min
+        }
+      }
+
+      on_demand_max_price_percentage_over_lowest_price = instance_requirements.value.on_demand_max_price_percentage_over_lowest_price
+      require_hibernate_support                        = instance_requirements.value.require_hibernate_support
+      spot_max_price_percentage_over_lowest_price      = instance_requirements.value.spot_max_price_percentage_over_lowest_price
+
+      dynamic "total_local_storage_gb" {
+        for_each = instance_requirements.value.total_local_storage_gb != null ? [instance_requirements.value.total_local_storage_gb] : []
+
+        content {
+          max = total_local_storage_gb.value.max
+          min = total_local_storage_gb.value.min
+        }
+      }
+
+      dynamic "vcpu_count" {
+        for_each = instance_requirements.value.vcpu_count != null ? [instance_requirements.value.vcpu_count] : []
+
+        content {
+          max = vcpu_count.value.max
+          min = vcpu_count.value.min
+        }
+      }
+    }
+  }
+
+  instance_type = var.instance_requirements != null ? null : var.instance_type
+  kernel_id     = var.kernel_id
+  key_name      = var.key_name
+
+  dynamic "license_specification" {
+    for_each = var.license_specifications != null ? var.license_specifications : []
+
+    content {
+      license_configuration_arn = license_specification.value.license_configuration_arn
+    }
+  }
+
+  dynamic "maintenance_options" {
+    for_each = var.maintenance_options != null ? [var.maintenance_options] : []
+
+    content {
+      auto_recovery = maintenance_options.value.auto_recovery
+    }
+  }
+
+  dynamic "metadata_options" {
+    for_each = var.metadata_options != null ? [var.metadata_options] : []
+
+    content {
+      http_endpoint               = metadata_options.value.http_endpoint
+      http_protocol_ipv6          = metadata_options.value.http_protocol_ipv6
+      http_put_response_hop_limit = metadata_options.value.http_put_response_hop_limit
+      http_tokens                 = metadata_options.value.http_tokens
+      instance_metadata_tags      = metadata_options.value.instance_metadata_tags
+    }
+  }
+
+  dynamic "monitoring" {
+    for_each = var.enable_monitoring ? [1] : []
+
+    content {
+      enabled = var.enable_monitoring
+    }
+  }
+
+  name        = var.launch_template_use_name_prefix ? null : local.launch_template_name
+  name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null
+
+  dynamic "network_interfaces" {
+    for_each = length(var.network_interfaces) > 0 ? var.network_interfaces : []
+
+    content {
+      associate_carrier_ip_address = network_interfaces.value.associate_carrier_ip_address
+      associate_public_ip_address  = network_interfaces.value.associate_public_ip_address
+
+      dynamic "connection_tracking_specification" {
+        for_each = network_interfaces.value.connection_tracking_specification != null ? [network_interfaces.value.connection_tracking_specification] : []
+
+        content {
+          tcp_established_timeout = connection_tracking_specification.value.tcp_established_timeout
+          udp_stream_timeout      = connection_tracking_specification.value.udp_stream_timeout
+          udp_timeout             = connection_tracking_specification.value.udp_timeout
+        }
+      }
+
+      delete_on_termination = network_interfaces.value.delete_on_termination
+      description           = network_interfaces.value.description
+      device_index          = network_interfaces.value.device_index
+
+      dynamic "ena_srd_specification" {
+        for_each = network_interfaces.value.ena_srd_specification != null ? [network_interfaces.value.ena_srd_specification] : []
+
+        content {
+          ena_srd_enabled = ena_srd_specification.value.ena_srd_enabled
+
+          dynamic "ena_srd_udp_specification" {
+            for_each = ena_srd_specification.value.ena_srd_udp_specification != null ? [ena_srd_specification.value.ena_srd_udp_specification] : []
+
+            content {
+              ena_srd_udp_enabled = ena_srd_udp_specification.value.ena_srd_udp_enabled
+            }
+          }
+        }
+      }
+
+      interface_type       = network_interfaces.value.interface_type
+      ipv4_address_count   = network_interfaces.value.ipv4_address_count
+      ipv4_addresses       = network_interfaces.value.ipv4_addresses
+      ipv4_prefix_count    = network_interfaces.value.ipv4_prefix_count
+      ipv4_prefixes        = network_interfaces.value.ipv4_prefixes
+      ipv6_address_count   = network_interfaces.value.ipv6_address_count
+      ipv6_addresses       = network_interfaces.value.ipv6_addresses
+      ipv6_prefix_count    = network_interfaces.value.ipv6_prefix_count
+      ipv6_prefixes        = network_interfaces.value.ipv6_prefixes
+      network_card_index   = network_interfaces.value.network_card_index
+      network_interface_id = network_interfaces.value.network_interface_id
+      primary_ipv6         = network_interfaces.value.primary_ipv6
+      private_ip_address   = network_interfaces.value.private_ip_address
+      # Ref: https://github.com/hashicorp/terraform-provider-aws/issues/4570
+      security_groups = compact(concat(network_interfaces.value.security_groups, var.vpc_security_group_ids))
+      # Set on EKS managed node group, will fail if set here
+      # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics
+      # subnet_id       = try(network_interfaces.value.subnet_id, null)
+    }
+  }
+
+  dynamic "placement" {
+    for_each = var.placement != null || local.create_placement_group ? [var.placement] : []
+
+    content {
+      affinity                = try(placement.value.affinity, null)
+      availability_zone       = try(placement.value.availability_zone, null)
+      group_name              = try(aws_placement_group.this[0].name, placement.value.group_name)
+      host_id                 = try(placement.value.host_id, null)
+      host_resource_group_arn = try(placement.value.host_resource_group_arn, null)
+      partition_number        = try(placement.value.partition_number, null)
+      spread_domain           = try(placement.value.spread_domain, null)
+      tenancy                 = try(placement.value.tenancy, null)
+    }
+  }
+
+  dynamic "private_dns_name_options" {
+    for_each = var.private_dns_name_options != null ? [var.private_dns_name_options] : []
+
+    content {
+      enable_resource_name_dns_aaaa_record = private_dns_name_options.value.enable_resource_name_dns_aaaa_record
+      enable_resource_name_dns_a_record    = private_dns_name_options.value.enable_resource_name_dns_a_record
+      hostname_type                        = private_dns_name_options.value.hostname_type
+    }
+  }
+
+  ram_disk_id = var.ram_disk_id
+
+  dynamic "tag_specifications" {
+    for_each = toset(var.tag_specifications)
+
+    content {
+      resource_type = tag_specifications.key
+      tags          = merge(var.tags, { Name = var.name }, var.launch_template_tags)
+    }
+  }
+
+  update_default_version = var.update_launch_template_default_version
+  user_data              = module.user_data.user_data
+  vpc_security_group_ids = length(local.network_interfaces) > 0 ? [] : local.security_group_ids
+
+  tags = var.tags
+
+  # Prevent premature access of policies by pods that
+  # require permissions on create/destroy that depend on nodes
+  depends_on = [
+    aws_iam_role_policy_attachment.this,
+    aws_iam_role_policy_attachment.additional,
+  ]
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+################################################################################
+# Node Group
+################################################################################
+
+locals {
+  launch_template_id = var.create && var.create_launch_template ? aws_launch_template.this[0].id : var.launch_template_id
+  # Change order to allow users to set version priority before using defaults
+  launch_template_version = coalesce(var.launch_template_version, try(aws_launch_template.this[0].default_version, "$Default"))
+}
+
+resource "aws_autoscaling_group" "this" {
+  count = var.create && var.create_autoscaling_group ? 1 : 0
+
+  region = var.region
+
+  availability_zones        = var.availability_zones
+  capacity_rebalance        = var.capacity_rebalance
+  context                   = var.context
+  default_instance_warmup   = var.default_instance_warmup
+  desired_capacity          = var.desired_size
+  desired_capacity_type     = var.desired_size_type
+  enabled_metrics           = var.enabled_metrics
+  force_delete              = var.force_delete
+  health_check_grace_period = var.health_check_grace_period
+  health_check_type         = var.health_check_type
+
+  dynamic "initial_lifecycle_hook" {
+    for_each = var.initial_lifecycle_hooks != null ? var.initial_lifecycle_hooks : []
+
+    content {
+      default_result          = initial_lifecycle_hook.value.default_result
+      heartbeat_timeout       = initial_lifecycle_hook.value.heartbeat_timeout
+      lifecycle_transition    = initial_lifecycle_hook.value.lifecycle_transition
+      name                    = initial_lifecycle_hook.value.name
+      notification_metadata   = initial_lifecycle_hook.value.notification_metadata
+      notification_target_arn = initial_lifecycle_hook.value.notification_target_arn
+      role_arn                = initial_lifecycle_hook.value.role_arn
+    }
+  }
+
+  dynamic "instance_maintenance_policy" {
+    for_each = var.instance_maintenance_policy != null ? [var.instance_maintenance_policy] : []
+
+    content {
+      min_healthy_percentage = instance_maintenance_policy.value.min_healthy_percentage
+      max_healthy_percentage = instance_maintenance_policy.value.max_healthy_percentage
+    }
+  }
+
+  dynamic "instance_refresh" {
+    for_each = var.instance_refresh != null ? [var.instance_refresh] : []
+
+    content {
+      dynamic "preferences" {
+        for_each = instance_refresh.value.preferences != null ? [instance_refresh.value.preferences] : []
+
+        content {
+          dynamic "alarm_specification" {
+            for_each = preferences.value.alarm_specification != null ? [preferences.value.alarm_specification] : []
+
+            content {
+              alarms = alarm_specification.value.alarms
+            }
+          }
+
+          auto_rollback                = preferences.value.auto_rollback
+          checkpoint_delay             = preferences.value.checkpoint_delay
+          checkpoint_percentages       = preferences.value.checkpoint_percentages
+          instance_warmup              = preferences.value.instance_warmup
+          max_healthy_percentage       = preferences.value.max_healthy_percentage
+          min_healthy_percentage       = preferences.value.min_healthy_percentage
+          scale_in_protected_instances = preferences.value.scale_in_protected_instances
+          skip_matching                = preferences.value.skip_matching
+          standby_instances            = preferences.value.standby_instances
+        }
+      }
+
+      strategy = instance_refresh.value.strategy
+      triggers = instance_refresh.value.triggers
+    }
+  }
+
+  dynamic "launch_template" {
+    for_each = var.use_mixed_instances_policy ? [] : [1]
+
+    content {
+      id      = local.launch_template_id
+      version = local.launch_template_version
+    }
+  }
+
+  max_instance_lifetime = var.max_instance_lifetime
+  max_size              = var.max_size
+  metrics_granularity   = var.metrics_granularity
+  min_size              = var.min_size
+
+  ignore_failed_scaling_activities = var.ignore_failed_scaling_activities
+
+  dynamic "mixed_instances_policy" {
+    for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : []
+
+    content {
+      dynamic "instances_distribution" {
+        for_each = mixed_instances_policy.value.instances_distribution != null ? [mixed_instances_policy.value.instances_distribution] : []
+
+        content {
+          on_demand_allocation_strategy            = instances_distribution.value.on_demand_allocation_strategy
+          on_demand_base_capacity                  = instances_distribution.value.on_demand_base_capacity
+          on_demand_percentage_above_base_capacity = instances_distribution.value.on_demand_percentage_above_base_capacity
+          spot_allocation_strategy                 = instances_distribution.value.spot_allocation_strategy
+          spot_instance_pools                      = instances_distribution.value.spot_instance_pools
+          spot_max_price                           = instances_distribution.value.spot_max_price
+        }
+      }
+
+      dynamic "launch_template" {
+        for_each = [mixed_instances_policy.value.launch_template]
+
+        content {
+          launch_template_specification {
+            launch_template_id = local.launch_template_id
+            version            = local.launch_template_version
+          }
+
+          dynamic "override" {
+            for_each = launch_template.value.override != null ? launch_template.value.override : []
+
+            content {
+              dynamic "instance_requirements" {
+                for_each = override.value.instance_requirements != null ? [override.value.instance_requirements] : []
+
+                content {
+                  dynamic "accelerator_count" {
+                    for_each = instance_requirements.value.accelerator_count != null ? [instance_requirements.value.accelerator_count] : []
+
+                    content {
+                      max = accelerator_count.value.max
+                      min = accelerator_count.value.min
+                    }
+                  }
+
+                  accelerator_manufacturers = instance_requirements.value.accelerator_manufacturers
+                  accelerator_names         = instance_requirements.value.accelerator_names
+
+                  dynamic "accelerator_total_memory_mib" {
+                    for_each = instance_requirements.value.accelerator_total_memory_mib != null ? [instance_requirements.value.accelerator_total_memory_mib] : []
+
+                    content {
+                      max = accelerator_total_memory_mib.value.max
+                      min = accelerator_total_memory_mib.value.min
+                    }
+                  }
+
+                  accelerator_types      = instance_requirements.value.accelerator_types
+                  allowed_instance_types = instance_requirements.value.allowed_instance_types
+                  bare_metal             = instance_requirements.value.bare_metal
+
+                  dynamic "baseline_ebs_bandwidth_mbps" {
+                    for_each = instance_requirements.value.baseline_ebs_bandwidth_mbps != null ? [instance_requirements.value.baseline_ebs_bandwidth_mbps] : []
+
+                    content {
+                      max = baseline_ebs_bandwidth_mbps.value.max
+                      min = baseline_ebs_bandwidth_mbps.value.min
+                    }
+                  }
+
+                  burstable_performance                                   = instance_requirements.value.burstable_performance
+                  cpu_manufacturers                                       = instance_requirements.value.cpu_manufacturers
+                  excluded_instance_types                                 = instance_requirements.value.excluded_instance_types
+                  instance_generations                                    = instance_requirements.value.instance_generations
+                  local_storage                                           = instance_requirements.value.local_storage
+                  local_storage_types                                     = instance_requirements.value.local_storage_types
+                  max_spot_price_as_percentage_of_optimal_on_demand_price = instance_requirements.value.max_spot_price_as_percentage_of_optimal_on_demand_price
+
+                  dynamic "memory_gib_per_vcpu" {
+                    for_each = instance_requirements.value.memory_gib_per_vcpu != null ? [instance_requirements.value.memory_gib_per_vcpu] : []
+
+                    content {
+                      max = memory_gib_per_vcpu.value.max
+                      min = memory_gib_per_vcpu.value.min
+                    }
+                  }
+
+                  dynamic "memory_mib" {
+                    for_each = instance_requirements.value.memory_mib != null ? [instance_requirements.value.memory_mib] : []
+
+                    content {
+                      max = memory_mib.value.max
+                      min = memory_mib.value.min
+                    }
+                  }
+
+                  dynamic "network_bandwidth_gbps" {
+                    for_each = instance_requirements.value.network_bandwidth_gbps != null ? [instance_requirements.value.network_bandwidth_gbps] : []
+
+                    content {
+                      max = network_bandwidth_gbps.value.max
+                      min = network_bandwidth_gbps.value.min
+                    }
+                  }
+
+                  dynamic "network_interface_count" {
+                    for_each = instance_requirements.value.network_interface_count != null ? [instance_requirements.value.network_interface_count] : []
+
+                    content {
+                      max = network_interface_count.value.max
+                      min = network_interface_count.value.min
+                    }
+                  }
+
+                  on_demand_max_price_percentage_over_lowest_price = instance_requirements.value.on_demand_max_price_percentage_over_lowest_price
+                  require_hibernate_support                        = instance_requirements.value.require_hibernate_support
+                  spot_max_price_percentage_over_lowest_price      = instance_requirements.value.spot_max_price_percentage_over_lowest_price
+
+                  dynamic "total_local_storage_gb" {
+                    for_each = instance_requirements.value.total_local_storage_gb != null ? [instance_requirements.value.total_local_storage_gb] : []
+
+                    content {
+                      max = total_local_storage_gb.value.max
+                      min = total_local_storage_gb.value.min
+                    }
+                  }
+
+                  dynamic "vcpu_count" {
+                    for_each = instance_requirements.value.vcpu_count != null ? [instance_requirements.value.vcpu_count] : []
+
+                    content {
+                      max = vcpu_count.value.max
+                      min = vcpu_count.value.min
+                    }
+                  }
+                }
+              }
+
+              instance_type = override.value.instance_type
+
+              dynamic "launch_template_specification" {
+                for_each = override.value.launch_template_specification != null ? [override.value.launch_template_specification] : []
+
+                content {
+                  launch_template_id   = launch_template_specification.value.launch_template_id
+                  launch_template_name = launch_template_specification.value.launch_template_name
+                  version              = launch_template_specification.value.version
+                }
+              }
+
+              weighted_capacity = override.value.weighted_capacity
+            }
+          }
+        }
+      }
+    }
+  }
+
+  name                  = var.use_name_prefix ? null : var.name
+  name_prefix           = var.use_name_prefix ? "${var.name}-" : null
+  placement_group       = var.placement_group
+  protect_from_scale_in = var.protect_from_scale_in
+  suspended_processes   = var.suspended_processes
+
+  dynamic "tag" {
+    for_each = merge(
+      {
+        "Name"                                      = var.name
+        "kubernetes.io/cluster/${var.cluster_name}" = "owned"
+        "k8s.io/cluster/${var.cluster_name}"        = "owned"
+      },
+      var.tags
+    )
+
+    content {
+      key                 = tag.key
+      value               = tag.value
+      propagate_at_launch = true
+    }
+  }
+
+  dynamic "tag" {
+    for_each = var.autoscaling_group_tags
+
+    content {
+      key                 = tag.key
+      value               = tag.value
+      propagate_at_launch = false
+    }
+  }
+
+  termination_policies = var.termination_policies
+  vpc_zone_identifier  = var.subnet_ids
+
+  dynamic "timeouts" {
+    for_each = var.timeouts != null ? [var.timeouts] : []
+
+    content {
+      delete = var.timeouts.delete
+    }
+  }
+
+  lifecycle {
+    create_before_destroy = true
+    ignore_changes = [
+      desired_capacity
+    ]
+  }
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+locals {
+  create_iam_instance_profile = var.create && var.create_iam_instance_profile
+
+  iam_role_name          = coalesce(var.iam_role_name, "${var.name}-node-group")
+  iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy"
+
+  ipv4_cni_policy = { for k, v in {
+    AmazonEKS_CNI_Policy = "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy"
+  } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" }
+  ipv6_cni_policy = { for k, v in {
+    AmazonEKS_CNI_IPv6_Policy = "arn:${local.partition}:iam::${local.account_id}:policy/AmazonEKS_CNI_IPv6_Policy"
+  } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" }
+}
+
+data "aws_iam_policy_document" "assume_role_policy" {
+  count = local.create_iam_instance_profile ? 1 : 0
+
+  statement {
+    sid     = "EKSNodeAssumeRole"
+    actions = ["sts:AssumeRole"]
+
+    principals {
+      type        = "Service"
+      identifiers = ["ec2.amazonaws.com"]
+    }
+  }
+}
+
+resource "aws_iam_role" "this" {
+  count = local.create_iam_instance_profile ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  path        = var.iam_role_path
+  description = var.iam_role_description
+
+  assume_role_policy    = data.aws_iam_policy_document.assume_role_policy[0].json
+  permissions_boundary  = var.iam_role_permissions_boundary
+  force_detach_policies = true
+
+  tags = merge(var.tags, var.iam_role_tags)
+}
+
+# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
+resource "aws_iam_role_policy_attachment" "this" {
+  for_each = { for k, v in merge(
+    {
+      AmazonEKSWorkerNodePolicy          = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy"
+      AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly"
+    },
+    local.ipv4_cni_policy,
+    local.ipv6_cni_policy
+  ) : k => v if local.create_iam_instance_profile }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+resource "aws_iam_role_policy_attachment" "additional" {
+  for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_instance_profile }
+
+  policy_arn = each.value
+  role       = aws_iam_role.this[0].name
+}
+
+resource "aws_iam_instance_profile" "this" {
+  count = local.create_iam_instance_profile ? 1 : 0
+
+  role = aws_iam_role.this[0].name
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  path        = var.iam_role_path
+
+  tags = merge(var.tags, var.iam_role_tags)
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+################################################################################
+# IAM Role Policy
+################################################################################
+
+locals {
+  create_iam_role_policy = local.create_iam_instance_profile && var.create_iam_role_policy && var.iam_role_policy_statements != null
+}
+
+data "aws_iam_policy_document" "role" {
+  count = local.create_iam_role_policy ? 1 : 0
+
+  dynamic "statement" {
+    for_each = var.iam_role_policy_statements != null ? var.iam_role_policy_statements : []
+
+    content {
+      sid           = statement.value.sid
+      actions       = statement.value.actions
+      not_actions   = statement.value.not_actions
+      effect        = statement.value.effect
+      resources     = statement.value.resources
+      not_resources = statement.value.not_resources
+
+      dynamic "principals" {
+        for_each = statement.value.principals != null ? statement.value.principals : []
+
+        content {
+          type        = principals.value.type
+          identifiers = principals.value.identifiers
+        }
+      }
+
+      dynamic "not_principals" {
+        for_each = statement.value.not_principals != null ? statement.value.not_principals : []
+
+        content {
+          type        = not_principals.value.type
+          identifiers = not_principals.value.identifiers
+        }
+      }
+
+      dynamic "condition" {
+        for_each = statement.value.condition != null ? statement.value.condition : []
+
+        content {
+          test     = condition.value.test
+          values   = condition.value.values
+          variable = condition.value.variable
+        }
+      }
+    }
+  }
+}
+
+resource "aws_iam_role_policy" "this" {
+  count = local.create_iam_role_policy ? 1 : 0
+
+  name        = var.iam_role_use_name_prefix ? null : local.iam_role_name
+  name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
+  policy      = data.aws_iam_policy_document.role[0].json
+  role        = aws_iam_role.this[0].id
+}
+
+################################################################################
+# Placement Group
+################################################################################
+
+locals {
+  create_placement_group = var.create && (local.enable_efa_support || var.create_placement_group)
+}
+
+resource "aws_placement_group" "this" {
+  count = local.create_placement_group ? 1 : 0
+
+  region = var.region
+
+  name     = "${var.cluster_name}-${var.name}"
+  strategy = "cluster"
+
+  tags = var.tags
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+resource "aws_eks_access_entry" "this" {
+  count = var.create && var.create_access_entry ? 1 : 0
+
+  region = var.region
+
+  cluster_name  = var.cluster_name
+  principal_arn = var.create_iam_instance_profile ? aws_iam_role.this[0].arn : var.iam_role_arn
+  type          = startswith(var.ami_type, "WINDOWS_") ? "EC2_WINDOWS" : "EC2_LINUX"
+
+  tags = var.tags
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+locals {
+  create_security_group = var.create && var.create_security_group && length(merge(local.security_group_ingress_rules, local.security_group_egress_rules)) > 0
+  security_group_name   = coalesce(var.security_group_name, "${var.cluster_name}-${var.name}")
+
+  security_group_ingress_rules = merge({ for k, v in
+    {
+      all_self_efa = {
+        description = "Node to node EFA"
+        protocol    = "-1"
+        from_port   = 0
+        self        = true
+      }
+    } : k => v if var.enable_efa_support
+    },
+    var.security_group_ingress_rules
+  )
+  security_group_egress_rules = merge({ for k, v in
+    {
+      all_self_efa = {
+        description = "Node to node EFA"
+        protocol    = "-1"
+        to_port     = 0
+        self        = true
+      }
+    } : k => v if var.enable_efa_support
+    },
+    var.security_group_egress_rules
+  )
+}
+
+data "aws_subnet" "this" {
+  count = local.create_security_group ? 1 : 0
+
+  region = var.region
+
+  id = element(var.subnet_ids, 0)
+}
+
+resource "aws_security_group" "this" {
+  count = local.create_security_group ? 1 : 0
+
+  region = var.region
+
+  name        = var.security_group_use_name_prefix ? null : local.security_group_name
+  name_prefix = var.security_group_use_name_prefix ? "${local.security_group_name}-" : null
+  description = var.security_group_description
+  vpc_id      = data.aws_subnet.this[0].vpc_id
+
+  tags = merge(
+    var.tags,
+    { "Name" = local.security_group_name },
+    var.security_group_tags
+  )
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+resource "aws_vpc_security_group_ingress_rule" "this" {
+  for_each = { for k, v in local.security_group_ingress_rules : k => v if length(local.security_group_ingress_rules) > 0 && local.create_security_group }
+
+  region = var.region
+
+  cidr_ipv4                    = each.value.cidr_ipv4
+  cidr_ipv6                    = each.value.cidr_ipv6
+  description                  = each.value.description
+  from_port                    = each.value.from_port
+  ip_protocol                  = each.value.ip_protocol
+  prefix_list_id               = each.value.prefix_list_id
+  referenced_security_group_id = each.value.self ? aws_security_group.this[0].id : each.value.referenced_security_group_id
+  security_group_id            = aws_security_group.this[0].id
+  tags = merge(
+    var.tags,
+    var.security_group_tags,
+    { "Name" = coalesce(each.value.name, "${local.security_group_name}-${each.key}") },
+    each.value.tags
+  )
+  to_port = try(coalesce(each.value.to_port, each.value.from_port), null)
+}
+
+resource "aws_vpc_security_group_egress_rule" "this" {
+  for_each = { for k, v in local.security_group_egress_rules : k => v if length(local.security_group_egress_rules) > 0 && local.create_security_group }
+
+  region = var.region
+
+  cidr_ipv4                    = each.value.cidr_ipv4
+  cidr_ipv6                    = each.value.cidr_ipv6
+  description                  = each.value.description
+  from_port                    = try(coalesce(each.value.from_port, each.value.to_port), null)
+  ip_protocol                  = each.value.ip_protocol
+  prefix_list_id               = each.value.prefix_list_id
+  referenced_security_group_id = each.value.self ? aws_security_group.this[0].id : each.value.referenced_security_group_id
+  security_group_id            = aws_security_group.this[0].id
+  tags = merge(
+    var.tags,
+    var.security_group_tags,
+    { "Name" = coalesce(each.value.name, "${local.security_group_name}-${each.key}") },
+    each.value.tags
+  )
+  to_port = each.value.to_port
+}
diff --git a/modules/self-managed-node-group/migrations.tf b/modules/self-managed-node-group/migrations.tf
new file mode 100644
index 0000000000..5d51a7208a
--- /dev/null
+++ b/modules/self-managed-node-group/migrations.tf
@@ -0,0 +1,20 @@
+################################################################################
+# Migrations: v20.7 -> v20.8
+################################################################################
+
+# Node IAM role policy attachment
+# Commercial partition only - `moved` does now allow multiple moves to same target
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"]
+}
+
+moved {
+  from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"]
+  to   = aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]
+}
diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..ad8710b890
--- /dev/null
+++ b/modules/self-managed-node-group/outputs.tf
@@ -0,0 +1,157 @@
+################################################################################
+# Launch template
+################################################################################
+
+output "launch_template_id" {
+  description = "The ID of the launch template"
+  value       = try(aws_launch_template.this[0].id, null)
+}
+
+output "launch_template_arn" {
+  description = "The ARN of the launch template"
+  value       = try(aws_launch_template.this[0].arn, null)
+}
+
+output "launch_template_latest_version" {
+  description = "The latest version of the launch template"
+  value       = try(aws_launch_template.this[0].latest_version, null)
+}
+
+output "launch_template_name" {
+  description = "The name of the launch template"
+  value       = try(aws_launch_template.this[0].name, null)
+}
+
+################################################################################
+# autoscaling group
+################################################################################
+
+output "autoscaling_group_arn" {
+  description = "The ARN for this autoscaling group"
+  value       = try(aws_autoscaling_group.this[0].arn, null)
+}
+
+output "autoscaling_group_id" {
+  description = "The autoscaling group id"
+  value       = try(aws_autoscaling_group.this[0].id, null)
+}
+
+output "autoscaling_group_name" {
+  description = "The autoscaling group name"
+  value       = try(aws_autoscaling_group.this[0].name, null)
+}
+
+output "autoscaling_group_min_size" {
+  description = "The minimum size of the autoscaling group"
+  value       = try(aws_autoscaling_group.this[0].min_size, null)
+}
+
+output "autoscaling_group_max_size" {
+  description = "The maximum size of the autoscaling group"
+  value       = try(aws_autoscaling_group.this[0].max_size, null)
+}
+
+output "autoscaling_group_desired_capacity" {
+  description = "The number of Amazon EC2 instances that should be running in the group"
+  value       = try(aws_autoscaling_group.this[0].desired_capacity, null)
+}
+
+output "autoscaling_group_default_cooldown" {
+  description = "Time between a scaling activity and the succeeding scaling activity"
+  value       = try(aws_autoscaling_group.this[0].default_cooldown, null)
+}
+
+output "autoscaling_group_health_check_grace_period" {
+  description = "Time after instance comes into service before checking health"
+  value       = try(aws_autoscaling_group.this[0].health_check_grace_period, null)
+}
+
+output "autoscaling_group_health_check_type" {
+  description = "EC2 or ELB. Controls how health checking is done"
+  value       = try(aws_autoscaling_group.this[0].health_check_type, null)
+}
+
+output "autoscaling_group_availability_zones" {
+  description = "The availability zones of the autoscaling group"
+  value       = try(aws_autoscaling_group.this[0].availability_zones, null)
+}
+
+output "autoscaling_group_vpc_zone_identifier" {
+  description = "The VPC zone identifier"
+  value       = try(aws_autoscaling_group.this[0].vpc_zone_identifier, null)
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "iam_role_name" {
+  description = "The name of the IAM role"
+  value       = try(aws_iam_role.this[0].name, null)
+}
+
+output "iam_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the IAM role"
+  value       = try(aws_iam_role.this[0].arn, null)
+}
+
+output "iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = try(aws_iam_role.this[0].unique_id, null)
+}
+
+################################################################################
+# IAM Instance Profile
+################################################################################
+
+output "iam_instance_profile_arn" {
+  description = "ARN assigned by AWS to the instance profile"
+  value       = try(aws_iam_instance_profile.this[0].arn, var.iam_instance_profile_arn)
+}
+
+output "iam_instance_profile_id" {
+  description = "Instance profile's ID"
+  value       = try(aws_iam_instance_profile.this[0].id, null)
+}
+
+output "iam_instance_profile_unique" {
+  description = "Stable and unique string identifying the IAM instance profile"
+  value       = try(aws_iam_instance_profile.this[0].unique_id, null)
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+output "access_entry_arn" {
+  description = "Amazon Resource Name (ARN) of the Access Entry"
+  value       = try(aws_eks_access_entry.this[0].access_entry_arn, null)
+}
+
+################################################################################
+# Additional
+################################################################################
+
+output "image_id" {
+  description = "ID of the image"
+  value       = try(aws_launch_template.this[0].image_id, null)
+}
+
+output "user_data" {
+  description = "Base64 encoded user data"
+  value       = try(module.user_data.user_data, null)
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the security group"
+  value       = try(aws_security_group.this[0].arn, null)
+}
+
+output "security_group_id" {
+  description = "ID of the security group"
+  value       = try(aws_security_group.this[0].id, null)
+}
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
new file mode 100644
index 0000000000..e3ef63e053
--- /dev/null
+++ b/modules/self-managed-node-group/variables.tf
@@ -0,0 +1,1030 @@
+variable "create" {
+  description = "Determines whether to create self managed node group or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "tags" {
+  description = "A map of tags to add to all resources"
+  type        = map(string)
+  default     = {}
+}
+
+variable "region" {
+  description = "Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration"
+  type        = string
+  default     = null
+}
+
+variable "partition" {
+  description = "The AWS partition - pass through value to reduce number of GET requests from data sources"
+  type        = string
+  default     = ""
+}
+
+variable "account_id" {
+  description = "The AWS account ID - pass through value to reduce number of GET requests from data sources"
+  type        = string
+  default     = ""
+}
+
+################################################################################
+# User Data
+################################################################################
+
+variable "cluster_name" {
+  description = "Name of associated EKS cluster"
+  type        = string
+  default     = ""
+}
+
+variable "cluster_endpoint" {
+  description = "Endpoint of associated EKS cluster"
+  type        = string
+  default     = null
+}
+
+variable "cluster_auth_base64" {
+  description = "Base64 encoded CA of associated EKS cluster"
+  type        = string
+  default     = null
+}
+
+variable "cluster_service_cidr" {
+  description = "The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself"
+  type        = string
+  default     = null
+}
+
+variable "cluster_ip_family" {
+  description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`"
+  type        = string
+  default     = null
+}
+
+variable "additional_cluster_dns_ips" {
+  description = "Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = list(string)
+  default     = null
+}
+
+variable "pre_bootstrap_user_data" {
+  description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = string
+  default     = null
+}
+
+variable "post_bootstrap_user_data" {
+  description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`"
+  type        = string
+  default     = null
+}
+
+variable "bootstrap_extra_args" {
+  description = "Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
+  type        = string
+  default     = null
+}
+
+variable "user_data_template_path" {
+  description = "Path to a local, custom user data template file to use when rendering user data"
+  type        = string
+  default     = null
+}
+
+variable "cloudinit_pre_nodeadm" {
+  description = "Array of cloud-init document parts that are created before the nodeadm document part"
+  type = list(object({
+    content      = string
+    content_type = optional(string)
+    filename     = optional(string)
+    merge_type   = optional(string)
+  }))
+  default = null
+}
+
+variable "cloudinit_post_nodeadm" {
+  description = "Array of cloud-init document parts that are created after the nodeadm document part"
+  type = list(object({
+    content      = string
+    content_type = optional(string)
+    filename     = optional(string)
+    merge_type   = optional(string)
+  }))
+  default = null
+}
+
+################################################################################
+# Launch template
+################################################################################
+
+variable "create_launch_template" {
+  description = "Determines whether to create launch template or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "launch_template_id" {
+  description = "The ID of an existing launch template to use. Required when `create_launch_template` = `false`"
+  type        = string
+  default     = ""
+}
+
+variable "launch_template_name" {
+  description = "Name of launch template to be created"
+  type        = string
+  default     = null
+}
+
+variable "launch_template_use_name_prefix" {
+  description = "Determines whether to use `launch_template_name` as is or create a unique name beginning with the `launch_template_name` as the prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "launch_template_description" {
+  description = "Description of the launch template"
+  type        = string
+  default     = null
+}
+
+variable "launch_template_default_version" {
+  description = "Default Version of the launch template"
+  type        = string
+  default     = null
+}
+
+variable "update_launch_template_default_version" {
+  description = "Whether to update Default Version each update. Conflicts with `launch_template_default_version`"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "disable_api_termination" {
+  description = "If true, enables EC2 instance termination protection"
+  type        = bool
+  default     = null
+}
+
+variable "instance_initiated_shutdown_behavior" {
+  description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)"
+  type        = string
+  default     = null
+}
+
+variable "kernel_id" {
+  description = "The kernel ID"
+  type        = string
+  default     = null
+}
+
+variable "ram_disk_id" {
+  description = "The ID of the ram disk"
+  type        = string
+  default     = null
+}
+
+variable "block_device_mappings" {
+  description = "Specify volumes to attach to the instance besides the volumes specified by the AMI"
+  type = map(object({
+    device_name = optional(string)
+    ebs = optional(object({
+      delete_on_termination      = optional(bool)
+      encrypted                  = optional(bool)
+      iops                       = optional(number)
+      kms_key_id                 = optional(string)
+      snapshot_id                = optional(string)
+      throughput                 = optional(number)
+      volume_initialization_rate = optional(number)
+      volume_size                = optional(number)
+      volume_type                = optional(string)
+    }))
+    no_device    = optional(string)
+    virtual_name = optional(string)
+  }))
+  default = null
+}
+
+variable "capacity_reservation_specification" {
+  description = "Targeting for EC2 capacity reservations"
+  type = object({
+    capacity_reservation_preference = optional(string)
+    capacity_reservation_target = optional(object({
+      capacity_reservation_id                 = optional(string)
+      capacity_reservation_resource_group_arn = optional(string)
+    }))
+  })
+  default = null
+}
+
+variable "cpu_options" {
+  description = "The CPU options for the instance"
+  type = object({
+    amd_sev_snp      = optional(string)
+    core_count       = optional(number)
+    threads_per_core = optional(number)
+  })
+  default = null
+}
+
+variable "credit_specification" {
+  description = "Customize the credit specification of the instance"
+  type = object({
+    cpu_credits = optional(string)
+  })
+  default = null
+}
+
+variable "enclave_options" {
+  description = "Enable Nitro Enclaves on launched instances"
+  type = object({
+    enabled = optional(bool)
+  })
+  default = null
+}
+
+variable "instance_market_options" {
+  description = "The market (purchasing) option for the instance"
+  type = object({
+    market_type = optional(string)
+    spot_options = optional(object({
+      block_duration_minutes         = optional(number)
+      instance_interruption_behavior = optional(string)
+      max_price                      = optional(string)
+      spot_instance_type             = optional(string)
+      valid_until                    = optional(string)
+    }))
+  })
+  default = null
+}
+
+variable "maintenance_options" {
+  description = "The maintenance options for the instance"
+  type = object({
+    auto_recovery = optional(string)
+  })
+  default = null
+}
+
+variable "license_specifications" {
+  description = "A list of license specifications to associate with"
+  type = list(object({
+    license_configuration_arn = string
+  }))
+  default = null
+}
+
+variable "network_interfaces" {
+  description = "Customize network interfaces to be attached at instance boot time"
+  type = list(object({
+    associate_carrier_ip_address = optional(bool)
+    associate_public_ip_address  = optional(bool)
+    connection_tracking_specification = optional(object({
+      tcp_established_timeout = optional(number)
+      udp_stream_timeout      = optional(number)
+      udp_timeout             = optional(number)
+    }))
+    delete_on_termination = optional(bool)
+    description           = optional(string)
+    device_index          = optional(number)
+    ena_srd_specification = optional(object({
+      ena_srd_enabled = optional(bool)
+      ena_srd_udp_specification = optional(object({
+        ena_srd_udp_enabled = optional(bool)
+      }))
+    }))
+    interface_type       = optional(string)
+    ipv4_address_count   = optional(number)
+    ipv4_addresses       = optional(list(string))
+    ipv4_prefix_count    = optional(number)
+    ipv4_prefixes        = optional(list(string))
+    ipv6_address_count   = optional(number)
+    ipv6_addresses       = optional(list(string))
+    ipv6_prefix_count    = optional(number)
+    ipv6_prefixes        = optional(list(string))
+    network_card_index   = optional(number)
+    network_interface_id = optional(string)
+    primary_ipv6         = optional(bool)
+    private_ip_address   = optional(string)
+    security_groups      = optional(list(string), [])
+    subnet_id            = optional(string)
+  }))
+  default  = []
+  nullable = false
+}
+
+variable "placement" {
+  description = "The placement of the instance"
+  type = object({
+    affinity                = optional(string)
+    availability_zone       = optional(string)
+    group_name              = optional(string)
+    host_id                 = optional(string)
+    host_resource_group_arn = optional(string)
+    partition_number        = optional(number)
+    spread_domain           = optional(string)
+    tenancy                 = optional(string)
+  })
+  default = null
+}
+
+variable "create_placement_group" {
+  description = "Determines whether a placement group is created & used by the node group"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "private_dns_name_options" {
+  description = "The options for the instance hostname. The default values are inherited from the subnet"
+  type = object({
+    enable_resource_name_dns_aaaa_record = optional(bool)
+    enable_resource_name_dns_a_record    = optional(bool)
+    hostname_type                        = optional(string)
+  })
+  default = null
+}
+
+variable "ebs_optimized" {
+  description = "If true, the launched EC2 instance will be EBS-optimized"
+  type        = bool
+  default     = null
+}
+
+variable "ami_id" {
+  description = "The AMI from which to launch the instance"
+  type        = string
+  default     = ""
+  nullable    = false
+}
+
+variable "ami_type" {
+  description = "Type of Amazon Machine Image (AMI) associated with the node group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values"
+  type        = string
+  default     = "AL2023_x86_64_STANDARD"
+  nullable    = false
+}
+
+variable "kubernetes_version" {
+  description = "Kubernetes cluster version - used to lookup default AMI ID if one is not provided"
+  type        = string
+  default     = null
+}
+
+variable "instance_requirements" {
+  description = "The attribute requirements for the type of instance. If present then `instance_type` cannot be present"
+  type = object({
+    accelerator_count = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    accelerator_manufacturers = optional(list(string))
+    accelerator_names         = optional(list(string))
+    accelerator_total_memory_mib = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    accelerator_types      = optional(list(string))
+    allowed_instance_types = optional(list(string))
+    bare_metal             = optional(string)
+    baseline_ebs_bandwidth_mbps = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    burstable_performance                                   = optional(string)
+    cpu_manufacturers                                       = optional(list(string))
+    excluded_instance_types                                 = optional(list(string))
+    instance_generations                                    = optional(list(string))
+    local_storage                                           = optional(string)
+    local_storage_types                                     = optional(list(string))
+    max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)
+    memory_gib_per_vcpu = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    memory_mib = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    network_bandwidth_gbps = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    network_interface_count = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    on_demand_max_price_percentage_over_lowest_price = optional(number)
+    require_hibernate_support                        = optional(bool)
+    spot_max_price_percentage_over_lowest_price      = optional(number)
+    total_local_storage_gb = optional(object({
+      max = optional(number)
+      min = optional(number)
+    }))
+    vcpu_count = optional(object({
+      max = optional(number)
+      min = string
+    }))
+  })
+  default = null
+}
+
+variable "instance_type" {
+  description = "The type of the instance to launch"
+  type        = string
+  default     = "m6i.large"
+  nullable    = false
+}
+
+variable "key_name" {
+  description = "The key name that should be used for the instance"
+  type        = string
+  default     = null
+}
+
+variable "vpc_security_group_ids" {
+  description = "A list of security group IDs to associate"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "cluster_primary_security_group_id" {
+  description = "The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service"
+  type        = string
+  default     = null
+}
+
+variable "enable_monitoring" {
+  description = "Enables/disables detailed monitoring"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "enable_efa_support" {
+  description = "Determines whether to enable Elastic Fabric Adapter (EFA) support"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "enable_efa_only" {
+  description = "Determines whether to enable EFA (`false`, default) or EFA and EFA-only (`true`) network interfaces. Note: requires vpc-cni version `v1.18.4` or later"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "efa_indices" {
+  description = "The indices of the network interfaces that should be EFA-enabled. Only valid when `enable_efa_support` = `true`"
+  type        = list(number)
+  default     = [0]
+  nullable    = false
+}
+
+variable "metadata_options" {
+  description = "Customize the metadata options for the instance"
+  type = object({
+    http_endpoint               = optional(string, "enabled")
+    http_protocol_ipv6          = optional(string)
+    http_put_response_hop_limit = optional(number, 1)
+    http_tokens                 = optional(string, "required")
+    instance_metadata_tags      = optional(string)
+  })
+  default = {
+    http_endpoint               = "enabled"
+    http_put_response_hop_limit = 1
+    http_tokens                 = "required"
+  }
+  nullable = false
+}
+
+variable "launch_template_tags" {
+  description = "A map of additional tags to add to the tag_specifications of launch template created"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+variable "tag_specifications" {
+  description = "The tags to apply to the resources during launch"
+  type        = list(string)
+  default     = ["instance", "volume", "network-interface"]
+  nullable    = false
+}
+
+################################################################################
+# Autoscaling group
+################################################################################
+
+variable "create_autoscaling_group" {
+  description = "Determines whether to create autoscaling group or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "name" {
+  description = "Name of the Self managed Node Group"
+  type        = string
+  default     = ""
+}
+
+variable "use_name_prefix" {
+  description = "Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "launch_template_version" {
+  description = "Launch template version. Can be version number, `$Latest`, or `$Default`"
+  type        = string
+  default     = null
+}
+
+variable "availability_zones" {
+  description = "A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids`"
+  type        = list(string)
+  default     = null
+}
+
+variable "subnet_ids" {
+  description = "A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`"
+  type        = list(string)
+  default     = null
+}
+
+variable "min_size" {
+  description = "The minimum size of the autoscaling group"
+  type        = number
+  default     = 1
+  nullable    = false
+}
+
+variable "max_size" {
+  description = "The maximum size of the autoscaling group"
+  type        = number
+  default     = 3
+  nullable    = false
+}
+
+variable "desired_size" {
+  description = "The number of Amazon EC2 instances that should be running in the autoscaling group"
+  type        = number
+  default     = 1
+  nullable    = false
+}
+
+variable "desired_size_type" {
+  description = "The unit of measurement for the value specified for `desired_size`. Supported for attribute-based instance type selection only. Valid values: `units`, `vcpu`, `memory-mib`"
+  type        = string
+  default     = null
+}
+
+variable "ignore_failed_scaling_activities" {
+  description = "Whether to ignore failed Auto Scaling scaling activities while waiting for capacity"
+  type        = bool
+  default     = null
+}
+
+variable "context" {
+  description = "Reserved"
+  type        = string
+  default     = null
+}
+
+variable "capacity_rebalance" {
+  description = "Indicates whether capacity rebalance is enabled"
+  type        = bool
+  default     = null
+}
+
+variable "default_instance_warmup" {
+  description = "Amount of time, in seconds, until a newly launched instance can contribute to the Amazon CloudWatch metrics. This delay lets an instance finish initializing before Amazon EC2 Auto Scaling aggregates instance metrics, resulting in more reliable usage data"
+  type        = number
+  default     = null
+}
+
+variable "protect_from_scale_in" {
+  description = "Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "placement_group" {
+  description = "The name of the placement group into which you'll launch your instances"
+  type        = string
+  default     = null
+}
+
+variable "health_check_type" {
+  description = "`EC2` or `ELB`. Controls how health checking is done"
+  type        = string
+  default     = null
+}
+
+variable "health_check_grace_period" {
+  description = "Time (in seconds) after instance comes into service before checking health"
+  type        = number
+  default     = null
+}
+
+variable "force_delete" {
+  description = "Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling"
+  type        = bool
+  default     = null
+}
+
+variable "termination_policies" {
+  description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "suspended_processes" {
+  description = "A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "max_instance_lifetime" {
+  description = "The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds"
+  type        = number
+  default     = null
+}
+
+variable "enabled_metrics" {
+  description = "A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`"
+  type        = list(string)
+  default     = []
+  nullable    = false
+}
+
+variable "metrics_granularity" {
+  description = "The granularity to associate with the metrics to collect. The only valid value is `1Minute`"
+  type        = string
+  default     = null
+}
+
+variable "initial_lifecycle_hooks" {
+  description = "One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource"
+  type = list(object({
+    default_result          = optional(string)
+    heartbeat_timeout       = optional(number)
+    lifecycle_transition    = string
+    name                    = string
+    notification_metadata   = optional(string)
+    notification_target_arn = optional(string)
+    role_arn                = optional(string)
+  }))
+  default = null
+}
+
+variable "instance_maintenance_policy" {
+  description = "If this block is configured, add a instance maintenance policy to the specified Auto Scaling group"
+  type = object({
+    max_healthy_percentage = number
+    min_healthy_percentage = number
+  })
+  default = null
+}
+
+variable "instance_refresh" {
+  description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated"
+  type = object({
+    preferences = optional(object({
+      alarm_specification = optional(object({
+        alarms = optional(list(string))
+      }))
+      auto_rollback                = optional(bool)
+      checkpoint_delay             = optional(number)
+      checkpoint_percentages       = optional(list(number))
+      instance_warmup              = optional(number)
+      max_healthy_percentage       = optional(number)
+      min_healthy_percentage       = optional(number, 33)
+      scale_in_protected_instances = optional(string)
+      skip_matching                = optional(bool)
+      standby_instances            = optional(string)
+    }))
+    strategy = optional(string, "Rolling")
+    triggers = optional(list(string))
+  })
+  default = {
+    strategy = "Rolling"
+    preferences = {
+      min_healthy_percentage = 66
+    }
+  }
+  nullable = false
+}
+
+variable "use_mixed_instances_policy" {
+  description = "Determines whether to use a mixed instances policy in the autoscaling group or not"
+  type        = bool
+  default     = false
+  nullable    = false
+}
+
+variable "mixed_instances_policy" {
+  description = "Configuration block containing settings to define launch targets for Auto Scaling groups"
+  type = object({
+    instances_distribution = optional(object({
+      on_demand_allocation_strategy            = optional(string)
+      on_demand_base_capacity                  = optional(number)
+      on_demand_percentage_above_base_capacity = optional(number)
+      spot_allocation_strategy                 = optional(string)
+      spot_instance_pools                      = optional(number)
+      spot_max_price                           = optional(string)
+    }))
+    launch_template = object({
+      override = optional(list(object({
+        instance_requirements = optional(object({
+          accelerator_count = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          accelerator_manufacturers = optional(list(string))
+          accelerator_names         = optional(list(string))
+          accelerator_total_memory_mib = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          accelerator_types      = optional(list(string))
+          allowed_instance_types = optional(list(string))
+          bare_metal             = optional(string)
+          baseline_ebs_bandwidth_mbps = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          burstable_performance                                   = optional(string)
+          cpu_manufacturers                                       = optional(list(string))
+          excluded_instance_types                                 = optional(list(string))
+          instance_generations                                    = optional(list(string))
+          local_storage                                           = optional(string)
+          local_storage_types                                     = optional(list(string))
+          max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)
+          memory_gib_per_vcpu = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          memory_mib = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          network_bandwidth_gbps = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          network_interface_count = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          on_demand_max_price_percentage_over_lowest_price = optional(number)
+          require_hibernate_support                        = optional(bool)
+          spot_max_price_percentage_over_lowest_price      = optional(number)
+          total_local_storage_gb = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+          vcpu_count = optional(object({
+            max = optional(number)
+            min = optional(number)
+          }))
+        }))
+        instance_type = optional(string)
+        launch_template_specification = optional(object({
+          launch_template_id   = optional(string)
+          launch_template_name = optional(string)
+          version              = optional(string)
+        }))
+        weighted_capacity = optional(string)
+      })))
+    })
+  })
+  default = null
+}
+
+variable "timeouts" {
+  description = "Timeout configurations for the autoscaling group"
+  type = object({
+    delete = optional(string)
+  })
+  default = null
+}
+
+variable "autoscaling_group_tags" {
+  description = "A map of additional tags to add to the autoscaling group created. Tags are applied to the autoscaling group only and are NOT propagated to instances"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+variable "create_iam_instance_profile" {
+  description = "Determines whether an IAM instance profile is created or to use an existing IAM instance profile"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_instance_profile_arn" {
+  description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false`"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_name" {
+  description = "Name to use on IAM role created"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_use_name_prefix" {
+  description = "Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_path" {
+  description = "IAM role path"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_description" {
+  description = "Description of the role"
+  type        = string
+  default     = "Self managed node group IAM role"
+  nullable    = false
+}
+
+variable "iam_role_permissions_boundary" {
+  description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_attach_cni_policy" {
+  description = "Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_additional_policies" {
+  description = "Additional policies to be added to the IAM role"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+variable "iam_role_tags" {
+  description = "A map of additional tags to add to the IAM role created"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
+
+################################################################################
+# IAM Role Policy
+################################################################################
+
+variable "create_iam_role_policy" {
+  description = "Determines whether an IAM role policy is created or not"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_policy_statements" {
+  description = "A list of IAM policy [statements](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document#statement) - used for adding specific IAM permissions as needed"
+  type = list(object({
+    sid           = optional(string)
+    actions       = optional(list(string))
+    not_actions   = optional(list(string))
+    effect        = optional(string)
+    resources     = optional(list(string))
+    not_resources = optional(list(string))
+    principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    not_principals = optional(list(object({
+      type        = string
+      identifiers = list(string)
+    })))
+    condition = optional(list(object({
+      test     = string
+      values   = list(string)
+      variable = string
+    })))
+  }))
+  default = null
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+variable "create_access_entry" {
+  description = "Determines whether an access entry is created for the IAM role used by the node group"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "iam_role_arn" {
+  description = "ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false`"
+  type        = string
+  default     = null
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+variable "create_security_group" {
+  description = "Determines if a security group is created"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "security_group_name" {
+  description = "Name to use on security group created"
+  type        = string
+  default     = null
+}
+
+variable "security_group_use_name_prefix" {
+  description = "Determines whether the security group name (`security_group_name`) is used as a prefix"
+  type        = bool
+  default     = true
+  nullable    = false
+}
+
+variable "security_group_description" {
+  description = "Description of the security group created"
+  type        = string
+  default     = null
+}
+
+variable "security_group_ingress_rules" {
+  description = "Security group ingress rules to add to the security group created"
+  type = map(object({
+    name = optional(string)
+
+    cidr_ipv4                    = optional(string)
+    cidr_ipv6                    = optional(string)
+    description                  = optional(string)
+    from_port                    = optional(string)
+    ip_protocol                  = optional(string, "tcp")
+    prefix_list_id               = optional(string)
+    referenced_security_group_id = optional(string)
+    self                         = optional(bool, false)
+    tags                         = optional(map(string), {})
+    to_port                      = optional(string)
+  }))
+  default  = {}
+  nullable = false
+}
+
+variable "security_group_egress_rules" {
+  description = "Security group egress rules to add to the security group created"
+  type = map(object({
+    name = optional(string)
+
+    cidr_ipv4                    = optional(string)
+    cidr_ipv6                    = optional(string)
+    description                  = optional(string)
+    from_port                    = optional(string)
+    ip_protocol                  = optional(string, "tcp")
+    prefix_list_id               = optional(string)
+    referenced_security_group_id = optional(string)
+    self                         = optional(bool, false)
+    tags                         = optional(map(string), {})
+    to_port                      = optional(string)
+  }))
+  default  = {}
+  nullable = false
+}
+
+variable "security_group_tags" {
+  description = "A map of additional tags to add to the security group created"
+  type        = map(string)
+  default     = {}
+  nullable    = false
+}
diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/modules/self-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/node_groups.tf b/node_groups.tf
index d98979310f..5a1655613e 100644
--- a/node_groups.tf
+++ b/node_groups.tf
@@ -1,25 +1,536 @@
-module "node_groups" {
-  source                               = "./modules/node_groups"
-  create_eks                           = var.create_eks
-  cluster_name                         = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-  default_iam_role_arn                 = coalescelist(aws_iam_role.workers[*].arn, [""])[0]
-  workers_group_defaults               = local.workers_group_defaults
-  worker_security_group_id             = local.worker_security_group_id
-  worker_additional_security_group_ids = var.worker_additional_security_group_ids
-  tags                                 = var.tags
-  node_groups_defaults                 = var.node_groups_defaults
-  node_groups                          = var.node_groups
-
-  # Hack to ensure ordering of resource creation.
-  # This is a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2
-  # Do not create node_groups before other resources are ready and removes race conditions
-  # Ensure these resources are created before "unlocking" the data source.
-  # Will be removed in Terraform 0.13
-  ng_depends_on = [
-    aws_eks_cluster.this,
-    kubernetes_config_map.aws_auth,
-    aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
-    aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
-    aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly
-  ]
+locals {
+  kubernetes_network_config = try(aws_eks_cluster.this[0].kubernetes_network_config[0], {})
+}
+
+# This sleep resource is used to provide a timed gap between the cluster creation and the downstream dependencies
+# that consume the outputs from here. Any of the values that are used as triggers can be used in dependencies
+# to ensure that the downstream resources are created after both the cluster is ready and the sleep time has passed.
+# This was primarily added to give addons that need to be configured BEFORE data plane compute resources
+# enough time to create and configure themselves before the data plane compute resources are created.
+resource "time_sleep" "this" {
+  count = var.create ? 1 : 0
+
+  create_duration = var.dataplane_wait_duration
+
+  triggers = {
+    name               = aws_eks_cluster.this[0].id
+    endpoint           = aws_eks_cluster.this[0].endpoint
+    kubernetes_version = aws_eks_cluster.this[0].version
+    service_cidr       = var.ip_family == "ipv6" ? try(local.kubernetes_network_config.service_ipv6_cidr, "") : try(local.kubernetes_network_config.service_ipv4_cidr, "")
+
+    certificate_authority_data = aws_eks_cluster.this[0].certificate_authority[0].data
+  }
+}
+
+################################################################################
+# EKS IPV6 CNI Policy
+# https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy
+################################################################################
+
+data "aws_iam_policy_document" "cni_ipv6_policy" {
+  count = var.create && var.create_cni_ipv6_iam_policy ? 1 : 0
+
+  statement {
+    sid = "AssignDescribe"
+    actions = [
+      "ec2:AssignIpv6Addresses",
+      "ec2:DescribeInstances",
+      "ec2:DescribeTags",
+      "ec2:DescribeNetworkInterfaces",
+      "ec2:DescribeInstanceTypes"
+    ]
+    resources = ["*"]
+  }
+
+  statement {
+    sid       = "CreateTags"
+    actions   = ["ec2:CreateTags"]
+    resources = ["arn:${local.partition}:ec2:*:*:network-interface/*"]
+  }
+}
+
+# Note - we are keeping this to a minimum in hopes that its soon replaced with an AWS managed policy like `AmazonEKS_CNI_Policy`
+resource "aws_iam_policy" "cni_ipv6_policy" {
+  count = var.create && var.create_cni_ipv6_iam_policy ? 1 : 0
+
+  # Will cause conflicts if trying to create on multiple clusters but necessary to reference by exact name in sub-modules
+  name        = "AmazonEKS_CNI_IPv6_Policy"
+  description = "IAM policy for EKS CNI to assign IPV6 addresses"
+  policy      = data.aws_iam_policy_document.cni_ipv6_policy[0].json
+
+  tags = var.tags
+}
+
+################################################################################
+# Node Security Group
+# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
+# Plus NTP/HTTPS (otherwise nodes fail to launch)
+################################################################################
+
+locals {
+  node_sg_name   = coalesce(var.node_security_group_name, "${var.name}-node")
+  create_node_sg = var.create && var.create_node_security_group
+
+  node_security_group_id = local.create_node_sg ? aws_security_group.node[0].id : var.node_security_group_id
+
+  node_security_group_rules = {
+    ingress_cluster_443 = {
+      description                   = "Cluster API to node groups"
+      protocol                      = "tcp"
+      from_port                     = 443
+      to_port                       = 443
+      type                          = "ingress"
+      source_cluster_security_group = true
+    }
+    ingress_cluster_kubelet = {
+      description                   = "Cluster API to node kubelets"
+      protocol                      = "tcp"
+      from_port                     = 10250
+      to_port                       = 10250
+      type                          = "ingress"
+      source_cluster_security_group = true
+    }
+    ingress_self_coredns_tcp = {
+      description = "Node to node CoreDNS"
+      protocol    = "tcp"
+      from_port   = 53
+      to_port     = 53
+      type        = "ingress"
+      self        = true
+    }
+    ingress_self_coredns_udp = {
+      description = "Node to node CoreDNS UDP"
+      protocol    = "udp"
+      from_port   = 53
+      to_port     = 53
+      type        = "ingress"
+      self        = true
+    }
+  }
+
+  node_security_group_recommended_rules = { for k, v in {
+    ingress_nodes_ephemeral = {
+      description = "Node to node ingress on ephemeral ports"
+      protocol    = "tcp"
+      from_port   = 1025
+      to_port     = 65535
+      type        = "ingress"
+      self        = true
+    }
+    # metrics-server
+    ingress_cluster_4443_webhook = {
+      description                   = "Cluster API to node 4443/tcp webhook"
+      protocol                      = "tcp"
+      from_port                     = 4443
+      to_port                       = 4443
+      type                          = "ingress"
+      source_cluster_security_group = true
+    }
+    # prometheus-adapter
+    ingress_cluster_6443_webhook = {
+      description                   = "Cluster API to node 6443/tcp webhook"
+      protocol                      = "tcp"
+      from_port                     = 6443
+      to_port                       = 6443
+      type                          = "ingress"
+      source_cluster_security_group = true
+    }
+    # Karpenter
+    ingress_cluster_8443_webhook = {
+      description                   = "Cluster API to node 8443/tcp webhook"
+      protocol                      = "tcp"
+      from_port                     = 8443
+      to_port                       = 8443
+      type                          = "ingress"
+      source_cluster_security_group = true
+    }
+    # ALB controller, NGINX
+    ingress_cluster_9443_webhook = {
+      description                   = "Cluster API to node 9443/tcp webhook"
+      protocol                      = "tcp"
+      from_port                     = 9443
+      to_port                       = 9443
+      type                          = "ingress"
+      source_cluster_security_group = true
+    }
+    egress_all = {
+      description      = "Allow all egress"
+      protocol         = "-1"
+      from_port        = 0
+      to_port          = 0
+      type             = "egress"
+      cidr_blocks      = ["0.0.0.0/0"]
+      ipv6_cidr_blocks = var.ip_family == "ipv6" ? ["::/0"] : null
+    }
+  } : k => v if var.node_security_group_enable_recommended_rules }
+}
+
+resource "aws_security_group" "node" {
+  count = local.create_node_sg ? 1 : 0
+
+  region = var.region
+
+  name        = var.node_security_group_use_name_prefix ? null : local.node_sg_name
+  name_prefix = var.node_security_group_use_name_prefix ? "${local.node_sg_name}${var.prefix_separator}" : null
+  description = var.node_security_group_description
+  vpc_id      = var.vpc_id
+
+  tags = merge(
+    var.tags,
+    {
+      "Name"                              = local.node_sg_name
+      "kubernetes.io/cluster/${var.name}" = "owned"
+    },
+    var.node_security_group_tags
+  )
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+resource "aws_security_group_rule" "node" {
+  for_each = { for k, v in merge(
+    local.node_security_group_rules,
+    local.node_security_group_recommended_rules,
+    var.node_security_group_additional_rules,
+  ) : k => v if local.create_node_sg }
+
+  region = var.region
+
+  security_group_id        = aws_security_group.node[0].id
+  protocol                 = each.value.protocol
+  from_port                = each.value.from_port
+  to_port                  = each.value.to_port
+  type                     = each.value.type
+  description              = try(each.value.description, null)
+  cidr_blocks              = try(each.value.cidr_blocks, null)
+  ipv6_cidr_blocks         = try(each.value.ipv6_cidr_blocks, null)
+  prefix_list_ids          = try(each.value.prefix_list_ids, null)
+  self                     = try(each.value.self, null)
+  source_security_group_id = try(each.value.source_cluster_security_group, false) ? local.security_group_id : try(each.value.source_security_group_id, null)
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+module "fargate_profile" {
+  source = "./modules/fargate-profile"
+
+  for_each = var.create && !local.create_outposts_local_cluster && var.fargate_profiles != null ? var.fargate_profiles : {}
+
+  create = each.value.create
+
+  region = var.region
+
+  # Pass through values to reduce GET requests from data sources
+  partition  = local.partition
+  account_id = local.account_id
+
+  # Fargate Profile
+  cluster_name      = time_sleep.this[0].triggers["name"]
+  cluster_ip_family = var.ip_family
+  name              = coalesce(each.value.name, each.key)
+  subnet_ids        = coalesce(each.value.subnet_ids, var.subnet_ids)
+  selectors         = each.value.selectors
+  timeouts          = each.value.timeouts
+
+  # IAM role
+  create_iam_role               = each.value.create_iam_role
+  iam_role_arn                  = each.value.iam_role_arn
+  iam_role_name                 = each.value.iam_role_name
+  iam_role_use_name_prefix      = each.value.iam_role_use_name_prefix
+  iam_role_path                 = each.value.iam_role_path
+  iam_role_description          = each.value.iam_role_description
+  iam_role_permissions_boundary = each.value.iam_role_permissions_boundary
+  iam_role_tags                 = each.value.iam_role_tags
+  iam_role_attach_cni_policy    = each.value.iam_role_attach_cni_policy
+  iam_role_additional_policies  = lookup(each.value, "iam_role_additional_policies", null)
+  create_iam_role_policy        = each.value.create_iam_role_policy
+  iam_role_policy_statements    = each.value.iam_role_policy_statements
+
+  tags = merge(
+    var.tags,
+    each.value.tags,
+  )
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+module "eks_managed_node_group" {
+  source = "./modules/eks-managed-node-group"
+
+  for_each = var.create && !local.create_outposts_local_cluster && var.eks_managed_node_groups != null ? var.eks_managed_node_groups : {}
+
+  create = each.value.create
+
+  region = var.region
+
+  # Pass through values to reduce GET requests from data sources
+  partition  = local.partition
+  account_id = local.account_id
+
+  cluster_name       = time_sleep.this[0].triggers["name"]
+  kubernetes_version = each.value.kubernetes_version != null ? each.value.kubernetes_version : time_sleep.this[0].triggers["kubernetes_version"]
+
+
+  # EKS Managed Node Group
+  name            = coalesce(each.value.name, each.key)
+  use_name_prefix = each.value.use_name_prefix
+
+  subnet_ids = coalesce(each.value.subnet_ids, var.subnet_ids)
+
+  min_size     = each.value.min_size
+  max_size     = each.value.max_size
+  desired_size = each.value.desired_size
+
+  ami_id                         = each.value.ami_id
+  ami_type                       = each.value.ami_type
+  ami_release_version            = each.value.ami_release_version
+  use_latest_ami_release_version = each.value.use_latest_ami_release_version
+
+  capacity_type        = each.value.capacity_type
+  disk_size            = each.value.disk_size
+  force_update_version = each.value.force_update_version
+  instance_types       = each.value.instance_types
+  labels               = each.value.labels
+  node_repair_config   = each.value.node_repair_config
+  remote_access        = each.value.remote_access
+  taints               = each.value.taints
+  update_config        = each.value.update_config
+  timeouts             = each.value.timeouts
+
+  # User data
+  cluster_endpoint           = try(time_sleep.this[0].triggers["endpoint"], "")
+  cluster_auth_base64        = try(time_sleep.this[0].triggers["certificate_authority_data"], "")
+  cluster_ip_family          = var.ip_family
+  cluster_service_cidr       = try(time_sleep.this[0].triggers["service_cidr"], "")
+  enable_bootstrap_user_data = each.value.enable_bootstrap_user_data
+  pre_bootstrap_user_data    = each.value.pre_bootstrap_user_data
+  post_bootstrap_user_data   = each.value.post_bootstrap_user_data
+  bootstrap_extra_args       = each.value.bootstrap_extra_args
+  user_data_template_path    = each.value.user_data_template_path
+  cloudinit_pre_nodeadm      = each.value.cloudinit_pre_nodeadm
+  cloudinit_post_nodeadm     = each.value.cloudinit_post_nodeadm
+
+  # Launch Template
+  create_launch_template                 = each.value.create_launch_template
+  use_custom_launch_template             = each.value.use_custom_launch_template
+  launch_template_id                     = each.value.launch_template_id
+  launch_template_name                   = coalesce(each.value.launch_template_name, each.key)
+  launch_template_use_name_prefix        = each.value.launch_template_use_name_prefix
+  launch_template_version                = each.value.launch_template_version
+  launch_template_default_version        = each.value.launch_template_default_version
+  update_launch_template_default_version = each.value.update_launch_template_default_version
+  launch_template_description            = coalesce(each.value.launch_template_description, "Custom launch template for ${coalesce(each.value.name, each.key)} EKS managed node group")
+  launch_template_tags                   = each.value.launch_template_tags
+  tag_specifications                     = each.value.tag_specifications
+
+  ebs_optimized           = each.value.ebs_optimized
+  key_name                = each.value.key_name
+  disable_api_termination = each.value.disable_api_termination
+  kernel_id               = each.value.kernel_id
+  ram_disk_id             = each.value.ram_disk_id
+
+  block_device_mappings              = each.value.block_device_mappings
+  capacity_reservation_specification = each.value.capacity_reservation_specification
+  cpu_options                        = each.value.cpu_options
+  credit_specification               = each.value.credit_specification
+  enclave_options                    = each.value.enclave_options
+  instance_market_options            = each.value.instance_market_options
+  license_specifications             = each.value.license_specifications
+  metadata_options                   = each.value.metadata_options
+  enable_monitoring                  = each.value.enable_monitoring
+  enable_efa_support                 = each.value.enable_efa_support
+  enable_efa_only                    = each.value.enable_efa_only
+  efa_indices                        = each.value.efa_indices
+  create_placement_group             = each.value.create_placement_group
+  placement                          = each.value.placement
+  network_interfaces                 = each.value.network_interfaces
+  maintenance_options                = each.value.maintenance_options
+  private_dns_name_options           = each.value.private_dns_name_options
+
+  # IAM role
+  create_iam_role               = each.value.create_iam_role
+  iam_role_arn                  = each.value.iam_role_arn
+  iam_role_name                 = each.value.iam_role_name
+  iam_role_use_name_prefix      = each.value.iam_role_use_name_prefix
+  iam_role_path                 = each.value.iam_role_path
+  iam_role_description          = each.value.iam_role_description
+  iam_role_permissions_boundary = each.value.iam_role_permissions_boundary
+  iam_role_tags                 = each.value.iam_role_tags
+  iam_role_attach_cni_policy    = each.value.iam_role_attach_cni_policy
+  iam_role_additional_policies  = lookup(each.value, "iam_role_additional_policies", null)
+  create_iam_role_policy        = each.value.create_iam_role_policy
+  iam_role_policy_statements    = each.value.iam_role_policy_statements
+
+  # Security group
+  vpc_security_group_ids            = compact(concat([local.node_security_group_id], each.value.vpc_security_group_ids))
+  cluster_primary_security_group_id = each.value.attach_cluster_primary_security_group ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null
+  create_security_group             = each.value.create_security_group
+  security_group_name               = each.value.security_group_name
+  security_group_use_name_prefix    = each.value.security_group_use_name_prefix
+  security_group_description        = each.value.security_group_description
+  security_group_ingress_rules      = each.value.security_group_ingress_rules
+  security_group_egress_rules       = each.value.security_group_egress_rules
+  security_group_tags               = each.value.security_group_tags
+
+  tags = merge(
+    var.tags,
+    each.value.tags,
+  )
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+module "self_managed_node_group" {
+  source = "./modules/self-managed-node-group"
+
+  for_each = var.create && var.self_managed_node_groups != null ? var.self_managed_node_groups : {}
+
+  create = each.value.create
+
+  region = var.region
+
+  # Pass through values to reduce GET requests from data sources
+  partition  = local.partition
+  account_id = local.account_id
+
+  cluster_name = time_sleep.this[0].triggers["name"]
+
+  # Autoscaling Group
+  create_autoscaling_group = each.value.create_autoscaling_group
+
+  name            = coalesce(each.value.name, each.key)
+  use_name_prefix = each.value.use_name_prefix
+
+  availability_zones = each.value.availability_zones
+  subnet_ids         = coalesce(each.value.subnet_ids, var.subnet_ids)
+
+  min_size                = each.value.min_size
+  max_size                = each.value.max_size
+  desired_size            = each.value.desired_size
+  desired_size_type       = each.value.desired_size_type
+  capacity_rebalance      = each.value.capacity_rebalance
+  default_instance_warmup = each.value.default_instance_warmup
+  protect_from_scale_in   = each.value.protect_from_scale_in
+  context                 = each.value.context
+
+  create_placement_group    = each.value.create_placement_group
+  placement_group           = each.value.placement_group
+  health_check_type         = each.value.health_check_type
+  health_check_grace_period = each.value.health_check_grace_period
+
+  ignore_failed_scaling_activities = each.value.ignore_failed_scaling_activities
+
+  force_delete          = each.value.force_delete
+  termination_policies  = each.value.termination_policies
+  suspended_processes   = each.value.suspended_processes
+  max_instance_lifetime = each.value.max_instance_lifetime
+
+  enabled_metrics     = each.value.enabled_metrics
+  metrics_granularity = each.value.metrics_granularity
+
+  initial_lifecycle_hooks     = each.value.initial_lifecycle_hooks
+  instance_maintenance_policy = each.value.instance_maintenance_policy
+  instance_refresh            = each.value.instance_refresh
+  use_mixed_instances_policy  = each.value.use_mixed_instances_policy
+  mixed_instances_policy      = each.value.mixed_instances_policy
+
+  timeouts               = each.value.timeouts
+  autoscaling_group_tags = each.value.autoscaling_group_tags
+
+  # User data
+  ami_type                   = each.value.ami_type
+  cluster_endpoint           = try(time_sleep.this[0].triggers["endpoint"], "")
+  cluster_auth_base64        = try(time_sleep.this[0].triggers["certificate_authority_data"], "")
+  cluster_service_cidr       = try(time_sleep.this[0].triggers["service_cidr"], "")
+  additional_cluster_dns_ips = each.value.additional_cluster_dns_ips
+  cluster_ip_family          = var.ip_family
+  pre_bootstrap_user_data    = each.value.pre_bootstrap_user_data
+  post_bootstrap_user_data   = each.value.post_bootstrap_user_data
+  bootstrap_extra_args       = each.value.bootstrap_extra_args
+  user_data_template_path    = each.value.user_data_template_path
+  cloudinit_pre_nodeadm      = each.value.cloudinit_pre_nodeadm
+  cloudinit_post_nodeadm     = each.value.cloudinit_post_nodeadm
+
+  # Launch Template
+  create_launch_template                 = each.value.create_launch_template
+  launch_template_id                     = each.value.launch_template_id
+  launch_template_name                   = coalesce(each.value.launch_template_name, each.key)
+  launch_template_use_name_prefix        = each.value.launch_template_use_name_prefix
+  launch_template_version                = each.value.launch_template_version
+  launch_template_default_version        = each.value.launch_template_default_version
+  update_launch_template_default_version = each.value.update_launch_template_default_version
+  launch_template_description            = coalesce(each.value.launch_template_description, "Custom launch template for ${coalesce(each.value.name, each.key)} self managed node group")
+  launch_template_tags                   = each.value.launch_template_tags
+  tag_specifications                     = each.value.tag_specifications
+
+  ebs_optimized      = each.value.ebs_optimized
+  ami_id             = each.value.ami_id
+  kubernetes_version = each.value.kubernetes_version != null ? each.value.kubernetes_version : time_sleep.this[0].triggers["kubernetes_version"]
+  instance_type      = each.value.instance_type
+  key_name           = each.value.key_name
+
+  disable_api_termination              = each.value.disable_api_termination
+  instance_initiated_shutdown_behavior = each.value.instance_initiated_shutdown_behavior
+  kernel_id                            = each.value.kernel_id
+  ram_disk_id                          = each.value.ram_disk_id
+
+  block_device_mappings              = each.value.block_device_mappings
+  capacity_reservation_specification = each.value.capacity_reservation_specification
+  cpu_options                        = each.value.cpu_options
+  credit_specification               = each.value.credit_specification
+  enclave_options                    = each.value.enclave_options
+  instance_requirements              = each.value.instance_requirements
+  instance_market_options            = each.value.instance_market_options
+  license_specifications             = each.value.license_specifications
+  metadata_options                   = each.value.metadata_options
+  enable_monitoring                  = each.value.enable_monitoring
+  enable_efa_support                 = each.value.enable_efa_support
+  enable_efa_only                    = each.value.enable_efa_only
+  efa_indices                        = each.value.efa_indices
+  network_interfaces                 = each.value.network_interfaces
+  placement                          = each.value.placement
+  maintenance_options                = each.value.maintenance_options
+  private_dns_name_options           = each.value.private_dns_name_options
+
+  # IAM role
+  create_iam_instance_profile   = each.value.create_iam_instance_profile
+  iam_instance_profile_arn      = each.value.iam_instance_profile_arn
+  iam_role_name                 = each.value.iam_role_name
+  iam_role_use_name_prefix      = each.value.iam_role_use_name_prefix
+  iam_role_path                 = each.value.iam_role_path
+  iam_role_description          = each.value.iam_role_description
+  iam_role_permissions_boundary = each.value.iam_role_permissions_boundary
+  iam_role_tags                 = each.value.iam_role_tags
+  iam_role_attach_cni_policy    = each.value.iam_role_attach_cni_policy
+  iam_role_additional_policies  = lookup(each.value, "iam_role_additional_policies", null)
+  create_iam_role_policy        = each.value.create_iam_role_policy
+  iam_role_policy_statements    = each.value.iam_role_policy_statements
+
+  # Access entry
+  create_access_entry = each.value.create_access_entry
+  iam_role_arn        = each.value.iam_role_arn
+
+  # Security group
+  vpc_security_group_ids            = compact(concat([local.node_security_group_id], each.value.vpc_security_group_ids))
+  cluster_primary_security_group_id = each.value.attach_cluster_primary_security_group ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null
+  create_security_group             = each.value.create_security_group
+  security_group_name               = each.value.security_group_name
+  security_group_use_name_prefix    = each.value.security_group_use_name_prefix
+  security_group_description        = each.value.security_group_description
+  security_group_ingress_rules      = each.value.security_group_ingress_rules
+  security_group_egress_rules       = each.value.security_group_egress_rules
+  security_group_tags               = each.value.security_group_tags
+
+  tags = merge(
+    var.tags,
+    each.value.tags,
+  )
 }
diff --git a/outputs.tf b/outputs.tf
index f6c53513d9..4663b83639 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,211 +1,280 @@
-output "cluster_id" {
-  description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready."
-  value       = element(concat(aws_eks_cluster.this.*.id, [""]), 0)
+locals {
 
-  # So that calling plans wait for the cluster to be available before attempting to use it.
-  # There is no need to duplicate this datasource
-  depends_on = [data.http.wait_for_cluster]
 }
 
+################################################################################
+# Cluster
+################################################################################
+
 output "cluster_arn" {
-  description = "The Amazon Resource Name (ARN) of the cluster."
-  value       = element(concat(aws_eks_cluster.this.*.arn, [""]), 0)
+  description = "The Amazon Resource Name (ARN) of the cluster"
+  value       = try(aws_eks_cluster.this[0].arn, null)
+
+  depends_on = [
+    aws_eks_access_entry.this,
+    aws_eks_access_policy_association.this,
+  ]
 }
 
 output "cluster_certificate_authority_data" {
-  description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster."
-  value       = element(concat(aws_eks_cluster.this[*].certificate_authority[0].data, [""]), 0)
+  description = "Base64 encoded certificate data required to communicate with the cluster"
+  value       = try(aws_eks_cluster.this[0].certificate_authority[0].data, null)
+
+  depends_on = [
+    aws_eks_access_entry.this,
+    aws_eks_access_policy_association.this,
+  ]
 }
 
 output "cluster_endpoint" {
-  description = "The endpoint for your EKS Kubernetes API."
-  value       = element(concat(aws_eks_cluster.this.*.endpoint, [""]), 0)
+  description = "Endpoint for your Kubernetes API server"
+  value       = try(aws_eks_cluster.this[0].endpoint, null)
+
+  depends_on = [
+    aws_eks_access_entry.this,
+    aws_eks_access_policy_association.this,
+  ]
 }
 
-output "cluster_version" {
-  description = "The Kubernetes server version for the EKS cluster."
-  value       = element(concat(aws_eks_cluster.this[*].version, [""]), 0)
+output "cluster_id" {
+  description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
+  value       = try(aws_eks_cluster.this[0].cluster_id, "")
 }
 
-output "cluster_security_group_id" {
-  description = "Security group ID attached to the EKS cluster. On 1.14 or later, this is the 'Additional security groups' in the EKS console."
-  value       = local.cluster_security_group_id
+output "cluster_name" {
+  description = "The name of the EKS cluster"
+  value       = try(aws_eks_cluster.this[0].name, "")
+
+  depends_on = [
+    aws_eks_access_entry.this,
+    aws_eks_access_policy_association.this,
+  ]
 }
 
-output "config_map_aws_auth" {
-  description = "A kubernetes configuration to authenticate to this EKS cluster."
-  value       = kubernetes_config_map.aws_auth.*
+output "cluster_oidc_issuer_url" {
+  description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, null)
 }
 
-output "cluster_iam_role_name" {
-  description = "IAM role name of the EKS cluster."
-  value       = local.cluster_iam_role_name
+output "cluster_dualstack_oidc_issuer_url" {
+  description = "Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider"
+  # https://github.com/aws/containers-roadmap/issues/2038#issuecomment-2278450601
+  value = try(replace(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://oidc.eks.", "https://oidc-eks."), ".amazonaws.com/", ".api.aws/"), null)
 }
 
-output "cluster_iam_role_arn" {
-  description = "IAM role ARN of the EKS cluster."
-  value       = local.cluster_iam_role_arn
+output "cluster_version" {
+  description = "The Kubernetes version for the cluster"
+  value       = try(aws_eks_cluster.this[0].version, null)
 }
 
-output "cluster_oidc_issuer_url" {
-  description = "The URL on the EKS cluster OIDC Issuer"
-  value       = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc.0.issuer, [""]))[0]
+output "cluster_platform_version" {
+  description = "Platform version for the cluster"
+  value       = try(aws_eks_cluster.this[0].platform_version, null)
+}
+
+output "cluster_status" {
+  description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+  value       = try(aws_eks_cluster.this[0].status, null)
 }
 
 output "cluster_primary_security_group_id" {
-  description = "The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console."
-  value       = local.cluster_primary_security_group_id
+  description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+  value       = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, null)
 }
 
-output "cloudwatch_log_group_name" {
-  description = "Name of cloudwatch log group created"
-  value       = element(concat(aws_cloudwatch_log_group.this[*].name, [""]), 0)
+output "cluster_service_cidr" {
+  description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from"
+  value       = var.ip_family == "ipv6" ? try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv6_cidr, null) : try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv4_cidr, null)
 }
 
-output "cloudwatch_log_group_arn" {
-  description = "Arn of cloudwatch log group created"
-  value       = element(concat(aws_cloudwatch_log_group.this[*].arn, [""]), 0)
+output "cluster_ip_family" {
+  description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)"
+  value       = try(aws_eks_cluster.this[0].kubernetes_network_config[0].ip_family, null)
 }
 
-output "kubeconfig" {
-  description = "kubectl config file contents for this EKS cluster. Will block on cluster creation until the cluster is really ready."
-  value       = local.kubeconfig
+################################################################################
+# Access Entry
+################################################################################
+
+output "access_entries" {
+  description = "Map of access entries created and their attributes"
+  value       = aws_eks_access_entry.this
+}
 
-  # So that calling plans wait for the cluster to be available before attempting to use it.
-  # There is no need to duplicate this datasource
-  depends_on = [data.http.wait_for_cluster]
+output "access_policy_associations" {
+  description = "Map of eks cluster access policy associations created and their attributes"
+  value       = aws_eks_access_policy_association.this
 }
 
-output "kubeconfig_filename" {
-  description = "The filename of the generated kubectl config. Will block on cluster creation until the cluster is really ready."
-  value       = concat(local_file.kubeconfig.*.filename, [""])[0]
+################################################################################
+# KMS Key
+################################################################################
 
-  # So that calling plans wait for the cluster to be available before attempting to use it.
-  # There is no need to duplicate this datasource
-  depends_on = [data.http.wait_for_cluster]
+output "kms_key_arn" {
+  description = "The Amazon Resource Name (ARN) of the key"
+  value       = module.kms.key_arn
 }
 
-output "oidc_provider_arn" {
-  description = "The ARN of the OIDC Provider if `enable_irsa = true`."
-  value       = var.enable_irsa ? concat(aws_iam_openid_connect_provider.oidc_provider[*].arn, [""])[0] : null
+output "kms_key_id" {
+  description = "The globally unique identifier for the key"
+  value       = module.kms.key_id
+}
+
+output "kms_key_policy" {
+  description = "The IAM resource policy set on the key"
+  value       = module.kms.key_policy
 }
 
-output "workers_asg_arns" {
-  description = "IDs of the autoscaling groups containing workers."
-  value = concat(
-    aws_autoscaling_group.workers.*.arn,
-    aws_autoscaling_group.workers_launch_template.*.arn,
-  )
+################################################################################
+# Cluster Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the cluster security group"
+  value       = try(aws_security_group.cluster[0].arn, null)
 }
 
-output "workers_asg_names" {
-  description = "Names of the autoscaling groups containing workers."
-  value = concat(
-    aws_autoscaling_group.workers.*.id,
-    aws_autoscaling_group.workers_launch_template.*.id,
-  )
+output "cluster_security_group_id" {
+  description = "ID of the cluster security group"
+  value       = try(aws_security_group.cluster[0].id, null)
 }
 
-output "workers_user_data" {
-  description = "User data of worker groups"
-  value = concat(
-    local.userdata_rendered,
-    local.launch_template_userdata_rendered,
-  )
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the node shared security group"
+  value       = try(aws_security_group.node[0].arn, null)
 }
 
-output "workers_default_ami_id" {
-  description = "ID of the default worker group AMI"
-  value       = local.default_ami_id_linux
+output "node_security_group_id" {
+  description = "ID of the node shared security group"
+  value       = try(aws_security_group.node[0].id, null)
 }
 
-output "workers_default_ami_id_windows" {
-  description = "ID of the default Windows worker group AMI"
-  value       = local.default_ami_id_windows
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider" {
+  description = "The OpenID Connect identity provider (issuer URL without leading `https://`)"
+  value       = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", ""), null)
 }
 
-output "workers_launch_template_ids" {
-  description = "IDs of the worker launch templates."
-  value       = aws_launch_template.workers_launch_template.*.id
+output "oidc_provider_arn" {
+  description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+  value       = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, null)
 }
 
-output "workers_launch_template_arns" {
-  description = "ARNs of the worker launch templates."
-  value       = aws_launch_template.workers_launch_template.*.arn
+output "cluster_tls_certificate_sha1_fingerprint" {
+  description = "The SHA1 fingerprint of the public key of the cluster's certificate"
+  value       = try(data.tls_certificate.this[0].certificates[0].sha1_fingerprint, null)
 }
 
-output "workers_launch_template_latest_versions" {
-  description = "Latest versions of the worker launch templates."
-  value       = aws_launch_template.workers_launch_template.*.latest_version
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+  description = "Cluster IAM role name"
+  value       = try(aws_iam_role.this[0].name, null)
 }
 
-output "worker_security_group_id" {
-  description = "Security group ID attached to the EKS workers."
-  value       = local.worker_security_group_id
+output "cluster_iam_role_arn" {
+  description = "Cluster IAM role ARN"
+  value       = try(aws_iam_role.this[0].arn, null)
 }
 
-output "worker_iam_instance_profile_arns" {
-  description = "default IAM instance profile ARN for EKS worker groups"
-  value = concat(
-    aws_iam_instance_profile.workers.*.arn,
-    aws_iam_instance_profile.workers_launch_template.*.arn
-  )
+output "cluster_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = try(aws_iam_role.this[0].unique_id, null)
 }
 
-output "worker_iam_instance_profile_names" {
-  description = "default IAM instance profile name for EKS worker groups"
-  value = concat(
-    aws_iam_instance_profile.workers.*.name,
-    aws_iam_instance_profile.workers_launch_template.*.name
-  )
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+output "node_iam_role_name" {
+  description = "EKS Auto node IAM role name"
+  value       = try(aws_iam_role.eks_auto[0].name, null)
 }
 
-output "worker_iam_role_name" {
-  description = "default IAM role name for EKS worker groups"
-  value = coalescelist(
-    aws_iam_role.workers.*.name,
-    data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
-    data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
-    [""]
-  )[0]
+output "node_iam_role_arn" {
+  description = "EKS Auto node IAM role ARN"
+  value       = try(aws_iam_role.eks_auto[0].arn, null)
 }
 
-output "worker_iam_role_arn" {
-  description = "default IAM role ARN for EKS worker groups"
-  value = coalescelist(
-    aws_iam_role.workers.*.arn,
-    data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_arn,
-    data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_arn,
-    [""]
-  )[0]
+output "node_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = try(aws_iam_role.eks_auto[0].unique_id, null)
 }
 
-output "fargate_profile_ids" {
-  description = "EKS Cluster name and EKS Fargate Profile names separated by a colon (:)."
-  value       = module.fargate.fargate_profile_ids
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+  description = "Map of attribute maps for all EKS cluster addons enabled"
+  value       = merge(aws_eks_addon.this, aws_eks_addon.before_compute)
 }
 
-output "fargate_profile_arns" {
-  description = "Amazon Resource Name (ARN) of the EKS Fargate Profiles."
-  value       = module.fargate.fargate_profile_arns
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+  description = "Map of attribute maps for all EKS identity providers enabled"
+  value       = aws_eks_identity_provider_config.this
 }
 
-output "fargate_iam_role_name" {
-  description = "IAM role name for EKS Fargate pods"
-  value       = module.fargate.iam_role_name
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+  description = "Name of cloudwatch log group created"
+  value       = try(aws_cloudwatch_log_group.this[0].name, null)
+}
+
+output "cloudwatch_log_group_arn" {
+  description = "Arn of cloudwatch log group created"
+  value       = try(aws_cloudwatch_log_group.this[0].arn, null)
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+  description = "Map of attribute maps for all EKS Fargate Profiles created"
+  value       = module.fargate_profile
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+  description = "Map of attribute maps for all EKS managed node groups created"
+  value       = module.eks_managed_node_group
 }
 
-output "fargate_iam_role_arn" {
-  description = "IAM role ARN for EKS Fargate pods"
-  value       = module.fargate.iam_role_arn
+output "eks_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by EKS managed node groups"
+  value       = compact(flatten([for group in module.eks_managed_node_group : group.node_group_autoscaling_group_names]))
 }
 
-output "node_groups" {
-  description = "Outputs from EKS node groups. Map of maps, keyed by var.node_groups keys"
-  value       = module.node_groups.node_groups
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+  description = "Map of attribute maps for all self managed node groups created"
+  value       = module.self_managed_node_group
 }
 
-output "security_group_rule_cluster_https_worker_ingress" {
-  description = "Security group rule responsible for allowing pods to communicate with the EKS cluster API."
-  value       = aws_security_group_rule.cluster_https_worker_ingress
+output "self_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by self-managed node groups"
+  value       = compact([for group in module.self_managed_node_group : group.autoscaling_group_name])
 }
diff --git a/templates/al2023_user_data.tpl b/templates/al2023_user_data.tpl
new file mode 100644
index 0000000000..cc360e6d65
--- /dev/null
+++ b/templates/al2023_user_data.tpl
@@ -0,0 +1,11 @@
+%{ if enable_bootstrap_user_data ~}
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  cluster:
+    name: ${cluster_name}
+    apiServerEndpoint: ${cluster_endpoint}
+    certificateAuthority: ${cluster_auth_base64}
+    cidr: ${cluster_service_cidr}
+%{ endif ~}
diff --git a/templates/al2_user_data.tpl b/templates/al2_user_data.tpl
new file mode 100644
index 0000000000..d75d549ccc
--- /dev/null
+++ b/templates/al2_user_data.tpl
@@ -0,0 +1,12 @@
+%{ if enable_bootstrap_user_data ~}
+#!/bin/bash
+set -e
+%{ endif ~}
+${pre_bootstrap_user_data ~}
+%{ if enable_bootstrap_user_data ~}
+B64_CLUSTER_CA=${cluster_auth_base64}
+API_SERVER_URL=${cluster_endpoint}
+/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+  --ip-family ${cluster_ip_family} --service-${cluster_ip_family}-cidr ${cluster_service_cidr}
+${post_bootstrap_user_data ~}
+%{ endif ~}
diff --git a/templates/bottlerocket_user_data.tpl b/templates/bottlerocket_user_data.tpl
new file mode 100644
index 0000000000..666d666069
--- /dev/null
+++ b/templates/bottlerocket_user_data.tpl
@@ -0,0 +1,8 @@
+%{ if enable_bootstrap_user_data ~}
+[settings.kubernetes]
+"cluster-name" = "${cluster_name}"
+"api-server" = "${cluster_endpoint}"
+"cluster-certificate" = "${cluster_auth_base64}"
+"cluster-dns-ip" = ${cluster_dns_ips}
+%{ endif ~}
+${bootstrap_extra_args ~}
diff --git a/templates/kubeconfig.tpl b/templates/kubeconfig.tpl
deleted file mode 100644
index a99a0dfa8f..0000000000
--- a/templates/kubeconfig.tpl
+++ /dev/null
@@ -1,38 +0,0 @@
-apiVersion: v1
-preferences: {}
-kind: Config
-
-clusters:
-- cluster:
-    server: ${endpoint}
-    certificate-authority-data: ${cluster_auth_base64}
-  name: ${kubeconfig_name}
-
-contexts:
-- context:
-    cluster: ${kubeconfig_name}
-    user: ${kubeconfig_name}
-  name: ${kubeconfig_name}
-
-current-context: ${kubeconfig_name}
-
-users:
-- name: ${kubeconfig_name}
-  user:
-    exec:
-      apiVersion: client.authentication.k8s.io/v1alpha1
-      command: ${aws_authenticator_command}
-      args:
-%{~ for i in aws_authenticator_command_args }
-        - "${i}"
-%{~ endfor ~}
-%{ for i in aws_authenticator_additional_args }
-        - ${i}
-%{~ endfor ~}
-%{ if length(aws_authenticator_env_variables) > 0 }
-      env:
-  %{~ for k, v in aws_authenticator_env_variables ~}
-        - name: ${k}
-          value: ${v}
-  %{~ endfor ~}
-%{ endif }
diff --git a/templates/userdata.sh.tpl b/templates/userdata.sh.tpl
deleted file mode 100644
index cf314b8800..0000000000
--- a/templates/userdata.sh.tpl
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash -e
-
-# Allow user supplied pre userdata code
-${pre_userdata}
-
-# Bootstrap and join the cluster
-/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
-
-# Allow user supplied userdata code
-${additional_userdata}
diff --git a/templates/userdata_windows.tpl b/templates/userdata_windows.tpl
deleted file mode 100644
index 61be8e8b11..0000000000
--- a/templates/userdata_windows.tpl
+++ /dev/null
@@ -1,11 +0,0 @@
-<powershell>
-${pre_userdata}
-
-[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
-[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
-[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
-& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -KubeletExtraArgs '${kubelet_extra_args}' 3>&1 4>&1 5>&1 6>&1
-$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
-
-${additional_userdata}
-</powershell>
diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl
new file mode 100644
index 0000000000..9721d3cc33
--- /dev/null
+++ b/templates/windows_user_data.tpl
@@ -0,0 +1,13 @@
+%{ if enable_bootstrap_user_data ~}
+<powershell>
+%{ endif ~}
+${pre_bootstrap_user_data ~}
+%{ if enable_bootstrap_user_data ~}
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -APIServerEndpoint ${cluster_endpoint} -Base64ClusterCA ${cluster_auth_base64} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+${post_bootstrap_user_data ~}
+</powershell>
+%{ endif ~}
diff --git a/tests/eks-fargate-profile/README.md b/tests/eks-fargate-profile/README.md
new file mode 100644
index 0000000000..2696041cea
--- /dev/null
+++ b/tests/eks-fargate-profile/README.md
@@ -0,0 +1,91 @@
+# EKS Fargate Profile
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_disabled_fargate_profile"></a> [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a |
+| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
+| <a name="module_fargate_profile"></a> [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a |
+| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_access_entries"></a> [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes |
+| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| <a name="output_cluster_dualstack_oidc_issuer_url"></a> [cluster\_dualstack\_oidc\_issuer\_url](#output\_cluster\_dualstack\_oidc\_issuer\_url) | Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | Cluster IAM role ARN |
+| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | Cluster IAM role name |
+| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
+| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| <a name="output_cluster_ip_family"></a> [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) |
+| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
+| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| <a name="output_cluster_service_cidr"></a> [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from |
+| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| <a name="output_cluster_tls_certificate_sha1_fingerprint"></a> [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate |
+| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| <a name="output_eks_managed_node_groups_autoscaling_group_names"></a> [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups |
+| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| <a name="output_kms_key_arn"></a> [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key |
+| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key |
+| <a name="output_kms_key_policy"></a> [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key |
+| <a name="output_node_iam_role_arn"></a> [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | EKS Auto node IAM role ARN |
+| <a name="output_node_iam_role_name"></a> [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | EKS Auto node IAM role name |
+| <a name="output_node_iam_role_unique_id"></a> [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| <a name="output_oidc_provider"></a> [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
+| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| <a name="output_self_managed_node_groups_autoscaling_group_names"></a> [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups |
+<!-- END_TF_DOCS -->
diff --git a/tests/eks-fargate-profile/main.tf b/tests/eks-fargate-profile/main.tf
new file mode 100644
index 0000000000..795e736fc7
--- /dev/null
+++ b/tests/eks-fargate-profile/main.tf
@@ -0,0 +1,165 @@
+provider "aws" {
+  region = local.region
+}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+locals {
+  name               = "ex-${basename(path.cwd)}"
+  kubernetes_version = "1.33"
+  region             = "eu-west-1"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Test       = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+  source = "../.."
+
+  name                   = local.name
+  kubernetes_version     = local.kubernetes_version
+  endpoint_public_access = true
+
+  addons = {
+    kube-proxy = {}
+    vpc-cni    = {}
+    coredns = {
+      configuration_values = jsonencode({
+        computeType = "fargate"
+      })
+    }
+  }
+
+  vpc_id                   = module.vpc.vpc_id
+  subnet_ids               = module.vpc.private_subnets
+  control_plane_subnet_ids = module.vpc.intra_subnets
+
+  # Fargate profiles use the cluster primary security group so these are not utilized
+  create_security_group      = false
+  create_node_security_group = false
+
+  fargate_profiles = {
+    example = {
+      name = "example"
+      selectors = [
+        {
+          namespace = "backend"
+          labels = {
+            Application = "backend"
+          }
+        },
+        {
+          namespace = "app-*"
+          labels = {
+            Application = "app-wildcard"
+          }
+        }
+      ]
+
+      iam_role_additional_policies = {
+        additional = aws_iam_policy.additional.arn
+      }
+
+      # Using specific subnets instead of the subnets supplied for the cluster itself
+      subnet_ids = [module.vpc.private_subnets[1]]
+
+      tags = {
+        Owner = "secondary"
+      }
+    }
+    kube-system = {
+      selectors = [
+        { namespace = "kube-system" }
+      ]
+    }
+  }
+
+  tags = local.tags
+}
+
+################################################################################
+# Sub-Module Usage on Existing/Separate Cluster
+################################################################################
+
+module "fargate_profile" {
+  source = "../../modules/fargate-profile"
+
+  name         = "separate-fargate-profile"
+  cluster_name = module.eks.cluster_name
+
+  subnet_ids = module.vpc.private_subnets
+  selectors = [{
+    namespace = "kube-system"
+  }]
+
+  tags = merge(local.tags, { Separate = "fargate-profile" })
+}
+
+module "disabled_fargate_profile" {
+  source = "../../modules/fargate-profile"
+
+  create = false
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
+
+resource "aws_iam_policy" "additional" {
+  name = "${local.name}-additional"
+
+  policy = jsonencode({
+    Version = "2012-10-17"
+    Statement = [
+      {
+        Action = [
+          "ec2:Describe*",
+        ]
+        Effect   = "Allow"
+        Resource = "*"
+      },
+    ]
+  })
+}
diff --git a/tests/eks-fargate-profile/outputs.tf b/tests/eks-fargate-profile/outputs.tf
new file mode 100644
index 0000000000..9ed8c27220
--- /dev/null
+++ b/tests/eks-fargate-profile/outputs.tf
@@ -0,0 +1,245 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+  description = "The Amazon Resource Name (ARN) of the cluster"
+  value       = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+  description = "Base64 encoded certificate data required to communicate with the cluster"
+  value       = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+  description = "Endpoint for your Kubernetes API server"
+  value       = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+  description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
+  value       = module.eks.cluster_id
+}
+
+output "cluster_name" {
+  description = "The name of the EKS cluster"
+  value       = module.eks.cluster_name
+}
+
+output "cluster_oidc_issuer_url" {
+  description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_dualstack_oidc_issuer_url" {
+  description = "Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_dualstack_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+  description = "Platform version for the cluster"
+  value       = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+  description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+  value       = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+  description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+  value       = module.eks.cluster_primary_security_group_id
+}
+
+output "cluster_service_cidr" {
+  description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from"
+  value       = module.eks.cluster_service_cidr
+}
+
+output "cluster_ip_family" {
+  description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)"
+  value       = module.eks.cluster_ip_family
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+output "access_entries" {
+  description = "Map of access entries created and their attributes"
+  value       = module.eks.access_entries
+}
+
+################################################################################
+# KMS Key
+################################################################################
+
+output "kms_key_arn" {
+  description = "The Amazon Resource Name (ARN) of the key"
+  value       = module.eks.kms_key_arn
+}
+
+output "kms_key_id" {
+  description = "The globally unique identifier for the key"
+  value       = module.eks.kms_key_id
+}
+
+output "kms_key_policy" {
+  description = "The IAM resource policy set on the key"
+  value       = module.eks.kms_key_policy
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the cluster security group"
+  value       = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+  description = "ID of the cluster security group"
+  value       = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the node shared security group"
+  value       = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+  description = "ID of the node shared security group"
+  value       = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider" {
+  description = "The OpenID Connect identity provider (issuer URL without leading `https://`)"
+  value       = module.eks.oidc_provider
+}
+
+output "oidc_provider_arn" {
+  description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+  value       = module.eks.oidc_provider_arn
+}
+
+output "cluster_tls_certificate_sha1_fingerprint" {
+  description = "The SHA1 fingerprint of the public key of the cluster's certificate"
+  value       = module.eks.cluster_tls_certificate_sha1_fingerprint
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+  description = "Cluster IAM role name"
+  value       = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+  description = "Cluster IAM role ARN"
+  value       = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+output "node_iam_role_name" {
+  description = "EKS Auto node IAM role name"
+  value       = module.eks.node_iam_role_name
+}
+
+output "node_iam_role_arn" {
+  description = "EKS Auto node IAM role ARN"
+  value       = module.eks.node_iam_role_arn
+}
+
+output "node_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.node_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+  description = "Map of attribute maps for all EKS cluster addons enabled"
+  value       = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+  description = "Map of attribute maps for all EKS identity providers enabled"
+  value       = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+  description = "Name of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+  description = "Arn of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+  description = "Map of attribute maps for all EKS Fargate Profiles created"
+  value       = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+  description = "Map of attribute maps for all EKS managed node groups created"
+  value       = module.eks.eks_managed_node_groups
+}
+
+output "eks_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by EKS managed node groups"
+  value       = module.eks.eks_managed_node_groups_autoscaling_group_names
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+  description = "Map of attribute maps for all self managed node groups created"
+  value       = module.eks.self_managed_node_groups
+}
+
+output "self_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by self-managed node groups"
+  value       = module.eks.self_managed_node_groups_autoscaling_group_names
+}
diff --git a/tests/eks-fargate-profile/variables.tf b/tests/eks-fargate-profile/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/eks-fargate-profile/versions.tf b/tests/eks-fargate-profile/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/tests/eks-fargate-profile/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/tests/eks-hybrid-nodes/README.md b/tests/eks-hybrid-nodes/README.md
new file mode 100644
index 0000000000..efe26912ce
--- /dev/null
+++ b/tests/eks-hybrid-nodes/README.md
@@ -0,0 +1,65 @@
+# EKS Hybrid Node IAM Role
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 4.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 4.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_disabled_eks_hybrid_node_role"></a> [disabled\_eks\_hybrid\_node\_role](#module\_disabled\_eks\_hybrid\_node\_role) | ../../modules/hybrid-node-role | n/a |
+| <a name="module_eks_hybrid_node_role"></a> [eks\_hybrid\_node\_role](#module\_eks\_hybrid\_node\_role) | ../../modules/hybrid-node-role | n/a |
+| <a name="module_ira_eks_hybrid_node_role"></a> [ira\_eks\_hybrid\_node\_role](#module\_ira\_eks\_hybrid\_node\_role) | ../../modules/hybrid-node-role | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [tls_private_key.example](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
+| [tls_self_signed_cert.example](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/self_signed_cert) | resource |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_arn"></a> [arn](#output\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_intermediate_role_arn"></a> [intermediate\_role\_arn](#output\_intermediate\_role\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_intermediate_role_name"></a> [intermediate\_role\_name](#output\_intermediate\_role\_name) | The name of the node IAM role |
+| <a name="output_intermediate_role_unique_id"></a> [intermediate\_role\_unique\_id](#output\_intermediate\_role\_unique\_id) | Stable and unique string identifying the node IAM role |
+| <a name="output_ira_arn"></a> [ira\_arn](#output\_ira\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_ira_intermediate_role_arn"></a> [ira\_intermediate\_role\_arn](#output\_ira\_intermediate\_role\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role |
+| <a name="output_ira_intermediate_role_name"></a> [ira\_intermediate\_role\_name](#output\_ira\_intermediate\_role\_name) | The name of the node IAM role |
+| <a name="output_ira_intermediate_role_unique_id"></a> [ira\_intermediate\_role\_unique\_id](#output\_ira\_intermediate\_role\_unique\_id) | Stable and unique string identifying the node IAM role |
+| <a name="output_ira_name"></a> [ira\_name](#output\_ira\_name) | The name of the node IAM role |
+| <a name="output_ira_unique_id"></a> [ira\_unique\_id](#output\_ira\_unique\_id) | Stable and unique string identifying the node IAM role |
+| <a name="output_name"></a> [name](#output\_name) | The name of the node IAM role |
+| <a name="output_unique_id"></a> [unique\_id](#output\_unique\_id) | Stable and unique string identifying the node IAM role |
+<!-- END_TF_DOCS -->
diff --git a/tests/eks-hybrid-nodes/main.tf b/tests/eks-hybrid-nodes/main.tf
new file mode 100644
index 0000000000..ec49725102
--- /dev/null
+++ b/tests/eks-hybrid-nodes/main.tf
@@ -0,0 +1,84 @@
+provider "aws" {
+  region = local.region
+}
+
+locals {
+  name   = "ex-${basename(path.cwd)}"
+  region = "us-west-2"
+
+  tags = {
+    Test       = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# Hybrid Node IAM Module
+################################################################################
+
+# Default (SSM)
+module "eks_hybrid_node_role" {
+  source = "../../modules/hybrid-node-role"
+
+  policy_statements = [
+    {
+      actions = [
+        "s3:Get*",
+        "s3:List*",
+      ]
+      resources = ["*"]
+    }
+  ]
+
+  tags = local.tags
+}
+
+# IAM Roles Anywhere
+module "ira_eks_hybrid_node_role" {
+  source = "../../modules/hybrid-node-role"
+
+  name = "${local.name}-ira"
+
+  enable_ira = true
+
+  ira_trust_anchor_source_type           = "CERTIFICATE_BUNDLE"
+  ira_trust_anchor_x509_certificate_data = local.cert_data
+
+  tags = local.tags
+}
+
+module "disabled_eks_hybrid_node_role" {
+  source = "../../modules/hybrid-node-role"
+
+  create = false
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+resource "tls_private_key" "example" {
+  algorithm = "RSA"
+  rsa_bits  = 4096
+}
+
+resource "tls_self_signed_cert" "example" {
+  private_key_pem = tls_private_key.example.private_key_pem
+
+  subject {
+    common_name  = "Custom root"
+    organization = "ACME Examples, Inc"
+  }
+
+  validity_period_hours = 17544
+  is_ca_certificate     = true
+
+  allowed_uses = [
+    "cert_signing",
+  ]
+}
+
+locals {
+  cert_data = trimspace(replace(trimprefix(tls_self_signed_cert.example.cert_pem, "-----BEGIN CERTIFICATE-----"), "-----END CERTIFICATE-----", ""))
+}
diff --git a/tests/eks-hybrid-nodes/outputs.tf b/tests/eks-hybrid-nodes/outputs.tf
new file mode 100644
index 0000000000..ffea7ccbed
--- /dev/null
+++ b/tests/eks-hybrid-nodes/outputs.tf
@@ -0,0 +1,71 @@
+################################################################################
+# Default (SSM) - Node IAM Role
+################################################################################
+
+# Node IAM Role
+output "name" {
+  description = "The name of the node IAM role"
+  value       = module.eks_hybrid_node_role.name
+}
+
+output "arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = module.eks_hybrid_node_role.arn
+}
+
+output "unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = module.eks_hybrid_node_role.unique_id
+}
+
+# Intermedaite IAM Role
+output "intermediate_role_name" {
+  description = "The name of the node IAM role"
+  value       = module.eks_hybrid_node_role.intermediate_role_name
+}
+
+output "intermediate_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = module.eks_hybrid_node_role.intermediate_role_arn
+}
+
+output "intermediate_role_unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = module.eks_hybrid_node_role.intermediate_role_unique_id
+}
+
+################################################################################
+# IAM Roles Anywhere - Node IAM Role
+################################################################################
+
+# Node IAM Role
+output "ira_name" {
+  description = "The name of the node IAM role"
+  value       = module.ira_eks_hybrid_node_role.name
+}
+
+output "ira_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = module.ira_eks_hybrid_node_role.arn
+}
+
+output "ira_unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = module.ira_eks_hybrid_node_role.unique_id
+}
+
+# Intermedaite IAM Role
+output "ira_intermediate_role_name" {
+  description = "The name of the node IAM role"
+  value       = module.ira_eks_hybrid_node_role.intermediate_role_name
+}
+
+output "ira_intermediate_role_arn" {
+  description = "The Amazon Resource Name (ARN) specifying the node IAM role"
+  value       = module.ira_eks_hybrid_node_role.intermediate_role_arn
+}
+
+output "ira_intermediate_role_unique_id" {
+  description = "Stable and unique string identifying the node IAM role"
+  value       = module.ira_eks_hybrid_node_role.intermediate_role_unique_id
+}
diff --git a/tests/eks-hybrid-nodes/variables.tf b/tests/eks-hybrid-nodes/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/eks-hybrid-nodes/versions.tf b/tests/eks-hybrid-nodes/versions.tf
new file mode 100644
index 0000000000..3c2ec900b1
--- /dev/null
+++ b/tests/eks-hybrid-nodes/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+    tls = {
+      source  = "hashicorp/tls"
+      version = ">= 4.0"
+    }
+  }
+}
diff --git a/tests/eks-managed-node-group/README.md b/tests/eks-managed-node-group/README.md
new file mode 100644
index 0000000000..15a05fa9e6
--- /dev/null
+++ b/tests/eks-managed-node-group/README.md
@@ -0,0 +1,101 @@
+# EKS Managed Node Group
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_aws_vpc_cni_ipv6_pod_identity"></a> [aws\_vpc\_cni\_ipv6\_pod\_identity](#module\_aws\_vpc\_cni\_ipv6\_pod\_identity) | terraform-aws-modules/eks-pod-identity/aws | ~> 1.6 |
+| <a name="module_disabled_eks"></a> [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
+| <a name="module_disabled_eks_managed_node_group"></a> [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
+| <a name="module_ebs_kms_key"></a> [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 4.0 |
+| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
+| <a name="module_eks_managed_node_group"></a> [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
+| <a name="module_key_pair"></a> [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 |
+| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_access_entries"></a> [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes |
+| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| <a name="output_cluster_dualstack_oidc_issuer_url"></a> [cluster\_dualstack\_oidc\_issuer\_url](#output\_cluster\_dualstack\_oidc\_issuer\_url) | Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | Cluster IAM role ARN |
+| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | Cluster IAM role name |
+| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
+| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| <a name="output_cluster_ip_family"></a> [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) |
+| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
+| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| <a name="output_cluster_service_cidr"></a> [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from |
+| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| <a name="output_cluster_tls_certificate_sha1_fingerprint"></a> [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate |
+| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| <a name="output_eks_managed_node_groups_autoscaling_group_names"></a> [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups |
+| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| <a name="output_kms_key_arn"></a> [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key |
+| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key |
+| <a name="output_kms_key_policy"></a> [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key |
+| <a name="output_node_iam_role_arn"></a> [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | EKS Auto node IAM role ARN |
+| <a name="output_node_iam_role_name"></a> [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | EKS Auto node IAM role name |
+| <a name="output_node_iam_role_unique_id"></a> [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| <a name="output_oidc_provider"></a> [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
+| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| <a name="output_self_managed_node_groups_autoscaling_group_names"></a> [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups |
+<!-- END_TF_DOCS -->
diff --git a/tests/eks-managed-node-group/main.tf b/tests/eks-managed-node-group/main.tf
new file mode 100644
index 0000000000..7d8adc08e3
--- /dev/null
+++ b/tests/eks-managed-node-group/main.tf
@@ -0,0 +1,640 @@
+provider "aws" {
+  region = local.region
+}
+
+data "aws_caller_identity" "current" {}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+locals {
+  name               = "ex-${replace(basename(path.cwd), "_", "-")}"
+  kubernetes_version = "1.33"
+  region             = "eu-west-1"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Test       = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+  source = "../.."
+
+  name                   = local.name
+  kubernetes_version     = local.kubernetes_version
+  endpoint_public_access = true
+
+  # IPV6
+  ip_family                  = "ipv6"
+  create_cni_ipv6_iam_policy = true
+
+  enable_cluster_creator_admin_permissions = true
+
+  addons = {
+    coredns = {
+      most_recent = true
+    }
+    eks-node-monitoring-agent = {
+      most_recent = true
+    }
+    eks-pod-identity-agent = {
+      before_compute = true
+      most_recent    = true
+    }
+    kube-proxy = {
+      most_recent = true
+    }
+    vpc-cni = {
+      most_recent    = true
+      before_compute = true
+      configuration_values = jsonencode({
+        env = {
+          # Reference docs https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html
+          ENABLE_PREFIX_DELEGATION = "true"
+          WARM_PREFIX_TARGET       = "1"
+        }
+      })
+      pod_identity_association = [{
+        role_arn        = module.aws_vpc_cni_ipv6_pod_identity.iam_role_arn
+        service_account = "aws-node"
+      }]
+    }
+  }
+
+  upgrade_policy = {
+    support_type = "STANDARD"
+  }
+
+  zonal_shift_config = {
+    enabled = true
+  }
+
+  vpc_id                   = module.vpc.vpc_id
+  subnet_ids               = module.vpc.private_subnets
+  control_plane_subnet_ids = module.vpc.intra_subnets
+
+  eks_managed_node_groups = {
+    # Default node group - as provided by AWS EKS
+    default_node_group = {
+      # By default, the module creates a launch template to ensure tags are propagated to instances, etc.,
+      # so we need to disable it to use the default template provided by the AWS EKS managed node group service
+      use_custom_launch_template = false
+
+      disk_size = 50
+
+      # Remote access cannot be specified with a launch template
+      remote_access = {
+        ec2_ssh_key               = module.key_pair.key_pair_name
+        source_security_group_ids = [aws_security_group.remote_access.id]
+      }
+    }
+
+    placement_group = {
+      create_placement_group = true
+      subnet_ids             = slice(module.vpc.private_subnets, 0, 1)
+      instance_types         = ["m5.large", "m5n.large", "m5zn.large"]
+    }
+
+    # AL2023 node group utilizing new user data format which utilizes nodeadm
+    # to join nodes to the cluster (instead of /etc/eks/bootstrap.sh)
+    al2023_nodeadm = {
+      ami_type                       = "AL2023_x86_64_STANDARD"
+      use_latest_ami_release_version = true
+
+      cloudinit_pre_nodeadm = [
+        {
+          content_type = "application/node.eks.aws"
+          content      = <<-EOT
+            ---
+            apiVersion: node.eks.aws/v1alpha1
+            kind: NodeConfig
+            spec:
+              kubelet:
+                config:
+                  shutdownGracePeriod: 30s
+          EOT
+        }
+      ]
+    }
+
+    # Default node group - as provided by AWS EKS using Bottlerocket
+    bottlerocket_default = {
+      # By default, the module creates a launch template to ensure tags are propagated to instances, etc.,
+      # so we need to disable it to use the default template provided by the AWS EKS managed node group service
+      use_custom_launch_template = false
+
+      ami_type = "BOTTLEROCKET_x86_64"
+    }
+
+    # Adds to the AWS provided user data
+    bottlerocket_add = {
+      ami_type = "BOTTLEROCKET_x86_64"
+
+      use_latest_ami_release_version = true
+
+      # This will get added to what AWS provides
+      bootstrap_extra_args = <<-EOT
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+      EOT
+    }
+
+    # Custom AMI, using module provided bootstrap data
+    bottlerocket_custom = {
+      # Current bottlerocket AMI
+      ami_id   = data.aws_ami.eks_default_bottlerocket.image_id
+      ami_type = "BOTTLEROCKET_x86_64"
+
+      # Use module user data template to bootstrap
+      enable_bootstrap_user_data = true
+      # This will get added to the template
+      bootstrap_extra_args = <<-EOT
+        # The admin host container provides SSH access and runs with "superpowers".
+        # It is disabled by default, but can be disabled explicitly.
+        [settings.host-containers.admin]
+        enabled = false
+
+        # The control host container provides out-of-band access via SSM.
+        # It is enabled by default, and can be disabled if you do not expect to use SSM.
+        # This could leave you with no way to access the API and change settings on an existing node!
+        [settings.host-containers.control]
+        enabled = true
+
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+
+        [settings.kubernetes.node-labels]
+        label1 = "foo"
+        label2 = "bar"
+
+        [settings.kubernetes.node-taints]
+        dedicated = "experimental:PreferNoSchedule"
+        special = "true:NoSchedule"
+      EOT
+    }
+
+    # Use a custom AMI
+    custom_ami = {
+      ami_type = "AL2023_ARM_64_STANDARD"
+      # Current default AMI used by managed node groups - pseudo "custom"
+      ami_id = data.aws_ami.eks_default_arm.image_id
+
+      # This will ensure the bootstrap user data is used to join the node
+      # By default, EKS managed node groups will not append bootstrap script;
+      # this adds it back in using the default template provided by the module
+      # Note: this assumes the AMI provided is an EKS optimized AMI derivative
+      enable_bootstrap_user_data = true
+
+      instance_types = ["t4g.medium"]
+    }
+
+    # Complete
+    complete = {
+      name            = "complete-eks-mng"
+      use_name_prefix = true
+
+      subnet_ids = module.vpc.private_subnets
+
+      min_size     = 1
+      max_size     = 7
+      desired_size = 1
+
+      ami_id                     = data.aws_ami.eks_default.image_id
+      enable_bootstrap_user_data = true
+
+      cloudinit_pre_nodeadm = [{
+        content      = <<-EOT
+          ---
+          apiVersion: node.eks.aws/v1alpha1
+          kind: NodeConfig
+          spec:
+            kubelet:
+              config:
+                shutdownGracePeriod: 30s
+        EOT
+        content_type = "application/node.eks.aws"
+      }]
+
+      # This is only possible with a custom AMI or self-managed node group
+      cloudinit_post_nodeadm = [{
+        content      = <<-EOT
+          echo "All done"
+        EOT
+        content_type = "text/x-shellscript; charset=\"us-ascii\""
+      }]
+
+      capacity_type        = "SPOT"
+      force_update_version = true
+      instance_types       = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+      labels = {
+        GithubRepo = "terraform-aws-eks"
+        GithubOrg  = "terraform-aws-modules"
+      }
+
+      update_config = {
+        max_unavailable_percentage = 33 # or set `max_unavailable`
+      }
+
+      description = "EKS managed node group example launch template"
+
+      ebs_optimized           = true
+      disable_api_termination = false
+      enable_monitoring       = true
+
+      block_device_mappings = {
+        xvda = {
+          device_name = "/dev/xvda"
+          ebs = {
+            volume_size           = 75
+            volume_type           = "gp3"
+            iops                  = 3000
+            throughput            = 150
+            encrypted             = true
+            kms_key_id            = module.ebs_kms_key.key_arn
+            delete_on_termination = true
+          }
+        }
+      }
+
+      metadata_options = {
+        http_endpoint               = "enabled"
+        http_tokens                 = "required"
+        http_put_response_hop_limit = 2
+        instance_metadata_tags      = "disabled"
+      }
+
+      node_repair_config = {
+        enabled = true
+      }
+
+      create_iam_role          = true
+      iam_role_name            = "eks-managed-node-group-complete-example"
+      iam_role_use_name_prefix = false
+      iam_role_description     = "EKS managed node group complete example role"
+      iam_role_tags = {
+        Purpose = "Protector of the kubelet"
+      }
+      iam_role_additional_policies = {
+        AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
+        additional                         = aws_iam_policy.node_additional.arn
+      }
+      iam_role_policy_statements = [
+        {
+          sid    = "ECRPullThroughCache"
+          effect = "Allow"
+          actions = [
+            "ecr:CreateRepository",
+            "ecr:BatchImportUpstreamImage",
+          ]
+          resources = ["*"]
+        }
+      ]
+
+      launch_template_tags = {
+        # enable discovery of autoscaling groups by cluster-autoscaler
+        "k8s.io/cluster-autoscaler/enabled" : true,
+        "k8s.io/cluster-autoscaler/${local.name}" : "owned",
+      }
+
+      tags = {
+        ExtraTag = "EKS managed node group complete example"
+      }
+    }
+
+    efa = {
+      # Disabling automatic creation due to instance type/quota availability
+      # Can be enabled when appropriate for testing/validation
+      create = false
+
+      # The EKS AL2023 NVIDIA AMI provides all of the necessary components
+      # for accelerated workloads w/ EFA
+      ami_type       = "AL2023_x86_64_NVIDIA"
+      instance_types = ["p5e.48xlarge"]
+
+      # Mount instance store volumes in RAID-0 for kubelet and containerd
+      # https://github.com/awslabs/amazon-eks-ami/blob/master/doc/USER_GUIDE.md#raid-0-for-kubelet-and-containerd-raid0
+      cloudinit_pre_nodeadm = [
+        {
+          content_type = "application/node.eks.aws"
+          content      = <<-EOT
+            ---
+            apiVersion: node.eks.aws/v1alpha1
+            kind: NodeConfig
+            spec:
+              instance:
+                localStorage:
+                  strategy: RAID0
+          EOT
+        }
+      ]
+
+      # This will:
+      # 1. Create a placement group to place the instances close to one another
+      # 2. Create and attach the necessary security group rules (and security group)
+      # 3. Expose all of the available EFA interfaces on the launch template
+      enable_efa_support = true
+      enable_efa_only    = true
+      efa_indices        = [0, 4, 8, 12]
+
+      min_size     = 1
+      max_size     = 1
+      desired_size = 1
+
+      labels = {
+        "vpc.amazonaws.com/efa.present" = "true"
+        "nvidia.com/gpu.present"        = "true"
+      }
+
+      taints = {
+        # Ensure only GPU workloads are scheduled on this node group
+        gpu = {
+          key    = "nvidia.com/gpu"
+          value  = "true"
+          effect = "NO_SCHEDULE"
+        }
+      }
+    }
+  }
+
+  access_entries = {
+    # One access entry with a policy associated
+    ex-single = {
+      principal_arn = aws_iam_role.this["single"].arn
+
+      policy_associations = {
+        single = {
+          policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
+          access_scope = {
+            namespaces = ["default"]
+            type       = "namespace"
+          }
+        }
+      }
+    }
+
+    # Example of adding multiple policies to a single access entry
+    ex-multiple = {
+      principal_arn = aws_iam_role.this["multiple"].arn
+
+      policy_associations = {
+        ex-one = {
+          policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy"
+          access_scope = {
+            namespaces = ["default"]
+            type       = "namespace"
+          }
+        }
+        ex-two = {
+          policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
+          access_scope = {
+            type = "cluster"
+          }
+        }
+      }
+    }
+  }
+
+  tags = local.tags
+}
+
+module "disabled_eks" {
+  source = "../.."
+
+  create = false
+}
+
+################################################################################
+# Sub-Module Usage on Existing/Separate Cluster
+################################################################################
+
+module "eks_managed_node_group" {
+  source = "../../modules/eks-managed-node-group"
+
+  name                 = "separate-eks-mng"
+  cluster_name         = module.eks.cluster_name
+  cluster_ip_family    = module.eks.cluster_ip_family
+  cluster_service_cidr = module.eks.cluster_service_cidr
+
+  subnet_ids                        = module.vpc.private_subnets
+  cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id
+  vpc_security_group_ids            = [module.eks.node_security_group_id]
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  # this will get added to what AWS provides
+  bootstrap_extra_args = <<-EOT
+    # extra args added
+    [settings.kernel]
+    lockdown = "integrity"
+
+    [settings.kubernetes.node-labels]
+    "label1" = "foo"
+    "label2" = "bar"
+  EOT
+
+  tags = merge(local.tags, { Separate = "eks-managed-node-group" })
+}
+
+module "disabled_eks_managed_node_group" {
+  source = "../../modules/eks-managed-node-group"
+
+  create = false
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway     = true
+  single_nat_gateway     = true
+  enable_ipv6            = true
+  create_egress_only_igw = true
+
+  public_subnet_ipv6_prefixes                    = [0, 1, 2]
+  public_subnet_assign_ipv6_address_on_creation  = true
+  private_subnet_ipv6_prefixes                   = [3, 4, 5]
+  private_subnet_assign_ipv6_address_on_creation = true
+  intra_subnet_ipv6_prefixes                     = [6, 7, 8]
+  intra_subnet_assign_ipv6_address_on_creation   = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
+
+module "aws_vpc_cni_ipv6_pod_identity" {
+  source  = "terraform-aws-modules/eks-pod-identity/aws"
+  version = "~> 1.6"
+
+  name = "aws-vpc-cni-ipv6"
+
+  attach_aws_vpc_cni_policy = true
+  aws_vpc_cni_enable_ipv6   = true
+
+  tags = local.tags
+}
+
+module "ebs_kms_key" {
+  source  = "terraform-aws-modules/kms/aws"
+  version = "~> 4.0"
+
+  description = "Customer managed key to encrypt EKS managed node group volumes"
+
+  # Policy
+  key_administrators = [
+    data.aws_caller_identity.current.arn
+  ]
+
+  key_service_roles_for_autoscaling = [
+    # required for the ASG to manage encrypted volumes for nodes
+    "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
+    # required for the cluster / persistentvolume-controller to create encrypted PVCs
+    module.eks.cluster_iam_role_arn,
+  ]
+
+  # Aliases
+  aliases = ["eks/${local.name}/ebs"]
+
+  tags = local.tags
+}
+
+module "key_pair" {
+  source  = "terraform-aws-modules/key-pair/aws"
+  version = "~> 2.0"
+
+  key_name_prefix    = local.name
+  create_private_key = true
+
+  tags = local.tags
+}
+
+resource "aws_security_group" "remote_access" {
+  name_prefix = "${local.name}-remote-access"
+  description = "Allow remote SSH access"
+  vpc_id      = module.vpc.vpc_id
+
+  ingress {
+    description = "SSH access"
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["10.0.0.0/8"]
+  }
+
+  egress {
+    from_port        = 0
+    to_port          = 0
+    protocol         = "-1"
+    cidr_blocks      = ["0.0.0.0/0"]
+    ipv6_cidr_blocks = ["::/0"]
+  }
+
+  tags = merge(local.tags, { Name = "${local.name}-remote" })
+}
+
+resource "aws_iam_policy" "node_additional" {
+  name        = "${local.name}-additional"
+  description = "Example usage of node additional policy"
+
+  policy = jsonencode({
+    Version = "2012-10-17"
+    Statement = [
+      {
+        Action = [
+          "ec2:Describe*",
+        ]
+        Effect   = "Allow"
+        Resource = "*"
+      },
+    ]
+  })
+
+  tags = local.tags
+}
+
+data "aws_ami" "eks_default" {
+  most_recent = true
+  owners      = ["amazon"]
+
+  filter {
+    name   = "name"
+    values = ["amazon-eks-node-al2023-x86_64-standard-${local.kubernetes_version}-v*"]
+  }
+}
+
+data "aws_ami" "eks_default_arm" {
+  most_recent = true
+  owners      = ["amazon"]
+
+  filter {
+    name   = "name"
+    values = ["amazon-eks-node-al2023-arm64-standard-${local.kubernetes_version}-v*"]
+  }
+}
+
+data "aws_ami" "eks_default_bottlerocket" {
+  most_recent = true
+  owners      = ["amazon"]
+
+  filter {
+    name   = "name"
+    values = ["bottlerocket-aws-k8s-${local.kubernetes_version}-x86_64-*"]
+  }
+}
+
+resource "aws_iam_role" "this" {
+  for_each = toset(["single", "multiple"])
+
+  name = "ex-${each.key}"
+
+  # Just using for this example
+  assume_role_policy = jsonencode({
+    Version = "2012-10-17"
+    Statement = [
+      {
+        Action = "sts:AssumeRole"
+        Effect = "Allow"
+        Sid    = "Example"
+        Principal = {
+          Service = "ec2.amazonaws.com"
+        }
+      },
+    ]
+  })
+
+  tags = local.tags
+}
diff --git a/tests/eks-managed-node-group/outputs.tf b/tests/eks-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..9ed8c27220
--- /dev/null
+++ b/tests/eks-managed-node-group/outputs.tf
@@ -0,0 +1,245 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+  description = "The Amazon Resource Name (ARN) of the cluster"
+  value       = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+  description = "Base64 encoded certificate data required to communicate with the cluster"
+  value       = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+  description = "Endpoint for your Kubernetes API server"
+  value       = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+  description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
+  value       = module.eks.cluster_id
+}
+
+output "cluster_name" {
+  description = "The name of the EKS cluster"
+  value       = module.eks.cluster_name
+}
+
+output "cluster_oidc_issuer_url" {
+  description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_dualstack_oidc_issuer_url" {
+  description = "Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_dualstack_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+  description = "Platform version for the cluster"
+  value       = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+  description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+  value       = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+  description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+  value       = module.eks.cluster_primary_security_group_id
+}
+
+output "cluster_service_cidr" {
+  description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from"
+  value       = module.eks.cluster_service_cidr
+}
+
+output "cluster_ip_family" {
+  description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)"
+  value       = module.eks.cluster_ip_family
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+output "access_entries" {
+  description = "Map of access entries created and their attributes"
+  value       = module.eks.access_entries
+}
+
+################################################################################
+# KMS Key
+################################################################################
+
+output "kms_key_arn" {
+  description = "The Amazon Resource Name (ARN) of the key"
+  value       = module.eks.kms_key_arn
+}
+
+output "kms_key_id" {
+  description = "The globally unique identifier for the key"
+  value       = module.eks.kms_key_id
+}
+
+output "kms_key_policy" {
+  description = "The IAM resource policy set on the key"
+  value       = module.eks.kms_key_policy
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the cluster security group"
+  value       = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+  description = "ID of the cluster security group"
+  value       = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the node shared security group"
+  value       = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+  description = "ID of the node shared security group"
+  value       = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider" {
+  description = "The OpenID Connect identity provider (issuer URL without leading `https://`)"
+  value       = module.eks.oidc_provider
+}
+
+output "oidc_provider_arn" {
+  description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+  value       = module.eks.oidc_provider_arn
+}
+
+output "cluster_tls_certificate_sha1_fingerprint" {
+  description = "The SHA1 fingerprint of the public key of the cluster's certificate"
+  value       = module.eks.cluster_tls_certificate_sha1_fingerprint
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+  description = "Cluster IAM role name"
+  value       = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+  description = "Cluster IAM role ARN"
+  value       = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+output "node_iam_role_name" {
+  description = "EKS Auto node IAM role name"
+  value       = module.eks.node_iam_role_name
+}
+
+output "node_iam_role_arn" {
+  description = "EKS Auto node IAM role ARN"
+  value       = module.eks.node_iam_role_arn
+}
+
+output "node_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.node_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+  description = "Map of attribute maps for all EKS cluster addons enabled"
+  value       = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+  description = "Map of attribute maps for all EKS identity providers enabled"
+  value       = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+  description = "Name of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+  description = "Arn of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+  description = "Map of attribute maps for all EKS Fargate Profiles created"
+  value       = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+  description = "Map of attribute maps for all EKS managed node groups created"
+  value       = module.eks.eks_managed_node_groups
+}
+
+output "eks_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by EKS managed node groups"
+  value       = module.eks.eks_managed_node_groups_autoscaling_group_names
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+  description = "Map of attribute maps for all self managed node groups created"
+  value       = module.eks.self_managed_node_groups
+}
+
+output "self_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by self-managed node groups"
+  value       = module.eks.self_managed_node_groups_autoscaling_group_names
+}
diff --git a/tests/eks-managed-node-group/variables.tf b/tests/eks-managed-node-group/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/eks-managed-node-group/versions.tf b/tests/eks-managed-node-group/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/tests/eks-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/tests/self-managed-node-group/README.md b/tests/self-managed-node-group/README.md
new file mode 100644
index 0000000000..feee069124
--- /dev/null
+++ b/tests/self-managed-node-group/README.md
@@ -0,0 +1,97 @@
+# Self-managed Node Group
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 6.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_aws_vpc_cni_ipv4_pod_identity"></a> [aws\_vpc\_cni\_ipv4\_pod\_identity](#module\_aws\_vpc\_cni\_ipv4\_pod\_identity) | terraform-aws-modules/eks-pod-identity/aws | ~> 1.6 |
+| <a name="module_disabled_self_managed_node_group"></a> [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
+| <a name="module_ebs_kms_key"></a> [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 4.0 |
+| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
+| <a name="module_key_pair"></a> [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 |
+| <a name="module_kms"></a> [kms](#module\_kms) | terraform-aws-modules/kms/aws | ~> 4.0 |
+| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 6.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
+| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
+| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_access_entries"></a> [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes |
+| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
+| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
+| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
+| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
+| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
+| <a name="output_cluster_dualstack_oidc_issuer_url"></a> [cluster\_dualstack\_oidc\_issuer\_url](#output\_cluster\_dualstack\_oidc\_issuer\_url) | Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
+| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | Cluster IAM role ARN |
+| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | Cluster IAM role name |
+| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
+| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
+| <a name="output_cluster_ip_family"></a> [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) |
+| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
+| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
+| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
+| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
+| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
+| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
+| <a name="output_cluster_service_cidr"></a> [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from |
+| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
+| <a name="output_cluster_tls_certificate_sha1_fingerprint"></a> [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate |
+| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
+| <a name="output_eks_managed_node_groups_autoscaling_group_names"></a> [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups |
+| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
+| <a name="output_kms_key_arn"></a> [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key |
+| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key |
+| <a name="output_kms_key_policy"></a> [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key |
+| <a name="output_node_iam_role_arn"></a> [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | EKS Auto node IAM role ARN |
+| <a name="output_node_iam_role_name"></a> [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | EKS Auto node IAM role name |
+| <a name="output_node_iam_role_unique_id"></a> [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
+| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
+| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
+| <a name="output_oidc_provider"></a> [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
+| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
+| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
+| <a name="output_self_managed_node_groups_autoscaling_group_names"></a> [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups |
+<!-- END_TF_DOCS -->
diff --git a/tests/self-managed-node-group/main.tf b/tests/self-managed-node-group/main.tf
new file mode 100644
index 0000000000..9fc3fdc6b1
--- /dev/null
+++ b/tests/self-managed-node-group/main.tf
@@ -0,0 +1,504 @@
+provider "aws" {
+  region = local.region
+}
+
+data "aws_caller_identity" "current" {}
+
+data "aws_availability_zones" "available" {
+  # Exclude local zones
+  filter {
+    name   = "opt-in-status"
+    values = ["opt-in-not-required"]
+  }
+}
+
+locals {
+  name               = "ex-${replace(basename(path.cwd), "_", "-")}"
+  kubernetes_version = "1.33"
+  region             = "eu-west-1"
+
+  vpc_cidr = "10.0.0.0/16"
+  azs      = slice(data.aws_availability_zones.available.names, 0, 3)
+
+  tags = {
+    Test       = local.name
+    GithubRepo = "terraform-aws-eks"
+    GithubOrg  = "terraform-aws-modules"
+  }
+}
+
+################################################################################
+# EKS Module
+################################################################################
+
+module "eks" {
+  source = "../.."
+
+  name                   = local.name
+  kubernetes_version     = local.kubernetes_version
+  endpoint_public_access = true
+
+  enable_cluster_creator_admin_permissions = true
+
+  addons = {
+    coredns = {
+      most_recent = true
+    }
+    eks-pod-identity-agent = {
+      before_compute = true
+      most_recent    = true
+    }
+    kube-proxy = {
+      most_recent = true
+    }
+    vpc-cni = {
+      before_compute = true
+      most_recent    = true
+      pod_identity_association = [{
+        role_arn        = module.aws_vpc_cni_ipv4_pod_identity.iam_role_arn
+        service_account = "aws-node"
+      }]
+    }
+  }
+
+  vpc_id                   = module.vpc.vpc_id
+  subnet_ids               = module.vpc.private_subnets
+  control_plane_subnet_ids = module.vpc.intra_subnets
+
+  # External encryption key
+  create_kms_key = false
+  encryption_config = {
+    resources        = ["secrets"]
+    provider_key_arn = module.kms.key_arn
+  }
+
+  self_managed_node_groups = {
+    # Default node group - as provisioned by the module defaults
+    default_node_group = {
+      ami_type = "AL2023_x86_64_STANDARD"
+      ami_id   = data.aws_ami.eks_default.image_id
+
+      # enable discovery of autoscaling groups by cluster-autoscaler
+      autoscaling_group_tags = {
+        "k8s.io/cluster-autoscaler/enabled" : true,
+        "k8s.io/cluster-autoscaler/${local.name}" : "owned",
+      }
+    }
+
+    # Bottlerocket node group
+    bottlerocket = {
+      name = "bottlerocket-self-mng"
+
+      ami_type      = "BOTTLEROCKET_x86_64"
+      ami_id        = data.aws_ami.eks_default_bottlerocket.id
+      instance_type = "m5.large"
+      desired_size  = 2
+      key_name      = module.key_pair.key_pair_name
+
+      bootstrap_extra_args = <<-EOT
+        # The admin host container provides SSH access and runs with "superpowers".
+        # It is disabled by default, but can be disabled explicitly.
+        [settings.host-containers.admin]
+        enabled = false
+
+        # The control host container provides out-of-band access via SSM.
+        # It is enabled by default, and can be disabled if you do not expect to use SSM.
+        # This could leave you with no way to access the API and change settings on an existing node!
+        [settings.host-containers.control]
+        enabled = true
+
+        # extra args added
+        [settings.kernel]
+        lockdown = "integrity"
+
+        [settings.kubernetes.node-labels]
+        label1 = "foo"
+        label2 = "bar"
+
+        [settings.kubernetes.node-taints]
+        dedicated = "experimental:PreferNoSchedule"
+        special = "true:NoSchedule"
+      EOT
+    }
+
+    mixed = {
+      name = "mixed"
+
+      min_size     = 1
+      max_size     = 5
+      desired_size = 2
+
+      cloudinit_pre_nodeadm = [{
+        content      = <<-EOT
+          ---
+          apiVersion: node.eks.aws/v1alpha1
+          kind: NodeConfig
+          spec:
+            kubelet:
+              flags:
+                - --node-labels=node.kubernetes.io/lifecycle=spot
+        EOT
+        content_type = "application/node.eks.aws"
+      }]
+
+      use_mixed_instances_policy = true
+      mixed_instances_policy = {
+        instances_distribution = {
+          on_demand_base_capacity                  = 0
+          on_demand_percentage_above_base_capacity = 20
+          spot_allocation_strategy                 = "capacity-optimized"
+        }
+
+        launch_template = {
+          override = [
+            {
+              instance_type     = "m5.large"
+              weighted_capacity = "1"
+            },
+            {
+              instance_type     = "m6i.large"
+              weighted_capacity = "2"
+            },
+          ]
+        }
+      }
+    }
+
+    instance_attributes = {
+      name = "instance-attributes"
+
+      min_size     = 1
+      max_size     = 2
+      desired_size = 1
+
+      cloudinit_pre_nodeadm = [{
+        content      = <<-EOT
+          ---
+          apiVersion: node.eks.aws/v1alpha1
+          kind: NodeConfig
+          spec:
+            kubelet:
+              config:
+                shutdownGracePeriod: 30s
+        EOT
+        content_type = "application/node.eks.aws"
+      }]
+
+      instance_type = null
+
+      # launch template configuration
+      instance_requirements = {
+        cpu_manufacturers                           = ["intel"]
+        instance_generations                        = ["current", "previous"]
+        spot_max_price_percentage_over_lowest_price = 100
+
+        memory_mib = {
+          min = 8192
+        }
+
+        vcpu_count = {
+          min = 1
+        }
+
+        allowed_instance_types = ["t*", "m*"]
+      }
+
+      use_mixed_instances_policy = true
+      mixed_instances_policy = {
+        instances_distribution = {
+          on_demand_base_capacity                  = 0
+          on_demand_percentage_above_base_capacity = 0
+          on_demand_allocation_strategy            = "lowest-price"
+          spot_allocation_strategy                 = "price-capacity-optimized"
+        }
+
+        # ASG configuration
+        launch_template = {
+          override = [
+            {
+              instance_requirements = {
+                cpu_manufacturers                           = ["intel"]
+                instance_generations                        = ["current", "previous"]
+                spot_max_price_percentage_over_lowest_price = 100
+
+                memory_mib = {
+                  min = 8192
+                }
+
+                vcpu_count = {
+                  min = 1
+                }
+
+                allowed_instance_types = ["t*", "m*"]
+              }
+            }
+          ]
+        }
+      }
+    }
+
+    complete = {
+      name            = "complete-self-mng"
+      use_name_prefix = false
+
+      subnet_ids = module.vpc.public_subnets
+
+      min_size     = 1
+      max_size     = 7
+      desired_size = 1
+
+      cloudinit_pre_nodeadm = [{
+        content      = <<-EOT
+          ---
+          apiVersion: node.eks.aws/v1alpha1
+          kind: NodeConfig
+          spec:
+            kubelet:
+              flags:
+                - --node-labels=node.kubernetes.io/lifecycle=spot
+        EOT
+        content_type = "application/node.eks.aws"
+      }]
+
+      instance_type = "m6i.large"
+
+      launch_template_name            = "self-managed-ex"
+      launch_template_use_name_prefix = true
+      launch_template_description     = "Self managed node group example launch template"
+
+      ebs_optimized     = true
+      enable_monitoring = true
+
+      block_device_mappings = {
+        xvda = {
+          device_name = "/dev/xvda"
+          ebs = {
+            volume_size           = 75
+            volume_type           = "gp3"
+            iops                  = 3000
+            throughput            = 150
+            encrypted             = true
+            kms_key_id            = module.ebs_kms_key.key_arn
+            delete_on_termination = true
+          }
+        }
+      }
+
+      metadata_options = {
+        http_endpoint               = "enabled"
+        http_tokens                 = "required"
+        http_put_response_hop_limit = 1
+        instance_metadata_tags      = "disabled"
+      }
+
+      create_iam_role          = true
+      iam_role_name            = "self-managed-node-group-complete-example"
+      iam_role_use_name_prefix = false
+      iam_role_description     = "Self managed node group complete example role"
+      iam_role_tags = {
+        Purpose = "Protector of the kubelet"
+      }
+      iam_role_additional_policies = {
+        AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
+        additional                         = aws_iam_policy.additional.arn
+      }
+
+      tags = {
+        ExtraTag = "Self managed node group complete example"
+      }
+    }
+
+    efa = {
+      # Disabling automatic creation due to instance type/quota availability
+      # Can be enabled when appropriate for testing/validation
+      create = false
+
+      # The EKS AL2023 NVIDIA AMI provides all of the necessary components
+      # for accelerated workloads w/ EFA
+      ami_type       = "AL2023_x86_64_NVIDIA"
+      instance_types = ["p5e.48xlarge"]
+
+      # Mount instance store volumes in RAID-0 for kubelet and containerd
+      # https://github.com/awslabs/amazon-eks-ami/blob/master/doc/USER_GUIDE.md#raid-0-for-kubelet-and-containerd-raid0
+      cloudinit_pre_nodeadm = [
+        {
+          content_type = "application/node.eks.aws"
+          content      = <<-EOT
+            ---
+            apiVersion: node.eks.aws/v1alpha1
+            kind: NodeConfig
+            spec:
+              instance:
+                localStorage:
+                  strategy: RAID0
+          EOT
+        }
+      ]
+
+      # This will:
+      # 1. Create a placement group to place the instances close to one another
+      # 2. Create and attach the necessary security group rules (and security group)
+      # 3. Expose all of the available EFA interfaces on the launch template
+      enable_efa_support = true
+      enable_efa_only    = true
+      efa_indices        = [0, 4, 8, 12]
+
+      min_size     = 2
+      max_size     = 2
+      desired_size = 2
+
+      labels = {
+        "vpc.amazonaws.com/efa.present" = "true"
+        "nvidia.com/gpu.present"        = "true"
+      }
+
+      taints = {
+        # Ensure only GPU workloads are scheduled on this node group
+        gpu = {
+          key    = "nvidia.com/gpu"
+          value  = "true"
+          effect = "NO_SCHEDULE"
+        }
+      }
+    }
+  }
+
+  tags = local.tags
+}
+
+module "disabled_self_managed_node_group" {
+  source = "../../modules/self-managed-node-group"
+
+  create = false
+
+  # Hard requirement
+  cluster_service_cidr = ""
+}
+
+################################################################################
+# Supporting Resources
+################################################################################
+
+module "vpc" {
+  source  = "terraform-aws-modules/vpc/aws"
+  version = "~> 6.0"
+
+  name = local.name
+  cidr = local.vpc_cidr
+
+  azs             = local.azs
+  private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+  public_subnets  = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+  intra_subnets   = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+  enable_nat_gateway = true
+  single_nat_gateway = true
+
+  public_subnet_tags = {
+    "kubernetes.io/role/elb" = 1
+  }
+
+  private_subnet_tags = {
+    "kubernetes.io/role/internal-elb" = 1
+  }
+
+  tags = local.tags
+}
+
+module "aws_vpc_cni_ipv4_pod_identity" {
+  source  = "terraform-aws-modules/eks-pod-identity/aws"
+  version = "~> 1.6"
+
+  name = "aws-vpc-cni-ipv4"
+
+  attach_aws_vpc_cni_policy = true
+  aws_vpc_cni_enable_ipv4   = true
+
+  tags = local.tags
+}
+
+data "aws_ami" "eks_default" {
+  most_recent = true
+  owners      = ["amazon"]
+
+  filter {
+    name   = "name"
+    values = ["amazon-eks-node-al2023-x86_64-standard-${local.kubernetes_version}-v*"]
+  }
+}
+
+data "aws_ami" "eks_default_bottlerocket" {
+  most_recent = true
+  owners      = ["amazon"]
+
+  filter {
+    name   = "name"
+    values = ["bottlerocket-aws-k8s-${local.kubernetes_version}-x86_64-*"]
+  }
+}
+
+module "key_pair" {
+  source  = "terraform-aws-modules/key-pair/aws"
+  version = "~> 2.0"
+
+  key_name_prefix    = local.name
+  create_private_key = true
+
+  tags = local.tags
+}
+
+module "ebs_kms_key" {
+  source  = "terraform-aws-modules/kms/aws"
+  version = "~> 4.0"
+
+  description = "Customer managed key to encrypt EKS managed node group volumes"
+
+  # Policy
+  key_administrators = [
+    data.aws_caller_identity.current.arn
+  ]
+
+  key_service_roles_for_autoscaling = [
+    # required for the ASG to manage encrypted volumes for nodes
+    "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
+    # required for the cluster / persistentvolume-controller to create encrypted PVCs
+    module.eks.cluster_iam_role_arn,
+  ]
+
+  # Aliases
+  aliases = ["eks/${local.name}/ebs"]
+
+  tags = local.tags
+}
+
+module "kms" {
+  source  = "terraform-aws-modules/kms/aws"
+  version = "~> 4.0"
+
+  aliases               = ["eks/${local.name}"]
+  description           = "${local.name} cluster encryption key"
+  enable_default_policy = true
+  key_owners            = [data.aws_caller_identity.current.arn]
+
+  tags = local.tags
+}
+
+resource "aws_iam_policy" "additional" {
+  name        = "${local.name}-additional"
+  description = "Example usage of node additional policy"
+
+  policy = jsonencode({
+    Version = "2012-10-17"
+    Statement = [
+      {
+        Action = [
+          "ec2:Describe*",
+        ]
+        Effect   = "Allow"
+        Resource = "*"
+      },
+    ]
+  })
+
+  tags = local.tags
+}
diff --git a/tests/self-managed-node-group/outputs.tf b/tests/self-managed-node-group/outputs.tf
new file mode 100644
index 0000000000..9ed8c27220
--- /dev/null
+++ b/tests/self-managed-node-group/outputs.tf
@@ -0,0 +1,245 @@
+################################################################################
+# Cluster
+################################################################################
+
+output "cluster_arn" {
+  description = "The Amazon Resource Name (ARN) of the cluster"
+  value       = module.eks.cluster_arn
+}
+
+output "cluster_certificate_authority_data" {
+  description = "Base64 encoded certificate data required to communicate with the cluster"
+  value       = module.eks.cluster_certificate_authority_data
+}
+
+output "cluster_endpoint" {
+  description = "Endpoint for your Kubernetes API server"
+  value       = module.eks.cluster_endpoint
+}
+
+output "cluster_id" {
+  description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
+  value       = module.eks.cluster_id
+}
+
+output "cluster_name" {
+  description = "The name of the EKS cluster"
+  value       = module.eks.cluster_name
+}
+
+output "cluster_oidc_issuer_url" {
+  description = "The URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_oidc_issuer_url
+}
+
+output "cluster_dualstack_oidc_issuer_url" {
+  description = "Dual-stack compatible URL on the EKS cluster for the OpenID Connect identity provider"
+  value       = module.eks.cluster_dualstack_oidc_issuer_url
+}
+
+output "cluster_platform_version" {
+  description = "Platform version for the cluster"
+  value       = module.eks.cluster_platform_version
+}
+
+output "cluster_status" {
+  description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
+  value       = module.eks.cluster_status
+}
+
+output "cluster_primary_security_group_id" {
+  description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
+  value       = module.eks.cluster_primary_security_group_id
+}
+
+output "cluster_service_cidr" {
+  description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from"
+  value       = module.eks.cluster_service_cidr
+}
+
+output "cluster_ip_family" {
+  description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)"
+  value       = module.eks.cluster_ip_family
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+output "access_entries" {
+  description = "Map of access entries created and their attributes"
+  value       = module.eks.access_entries
+}
+
+################################################################################
+# KMS Key
+################################################################################
+
+output "kms_key_arn" {
+  description = "The Amazon Resource Name (ARN) of the key"
+  value       = module.eks.kms_key_arn
+}
+
+output "kms_key_id" {
+  description = "The globally unique identifier for the key"
+  value       = module.eks.kms_key_id
+}
+
+output "kms_key_policy" {
+  description = "The IAM resource policy set on the key"
+  value       = module.eks.kms_key_policy
+}
+
+################################################################################
+# Security Group
+################################################################################
+
+output "cluster_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the cluster security group"
+  value       = module.eks.cluster_security_group_arn
+}
+
+output "cluster_security_group_id" {
+  description = "ID of the cluster security group"
+  value       = module.eks.cluster_security_group_id
+}
+
+################################################################################
+# Node Security Group
+################################################################################
+
+output "node_security_group_arn" {
+  description = "Amazon Resource Name (ARN) of the node shared security group"
+  value       = module.eks.node_security_group_arn
+}
+
+output "node_security_group_id" {
+  description = "ID of the node shared security group"
+  value       = module.eks.node_security_group_id
+}
+
+################################################################################
+# IRSA
+################################################################################
+
+output "oidc_provider" {
+  description = "The OpenID Connect identity provider (issuer URL without leading `https://`)"
+  value       = module.eks.oidc_provider
+}
+
+output "oidc_provider_arn" {
+  description = "The ARN of the OIDC Provider if `enable_irsa = true`"
+  value       = module.eks.oidc_provider_arn
+}
+
+output "cluster_tls_certificate_sha1_fingerprint" {
+  description = "The SHA1 fingerprint of the public key of the cluster's certificate"
+  value       = module.eks.cluster_tls_certificate_sha1_fingerprint
+}
+
+################################################################################
+# IAM Role
+################################################################################
+
+output "cluster_iam_role_name" {
+  description = "Cluster IAM role name"
+  value       = module.eks.cluster_iam_role_name
+}
+
+output "cluster_iam_role_arn" {
+  description = "Cluster IAM role ARN"
+  value       = module.eks.cluster_iam_role_arn
+}
+
+output "cluster_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.cluster_iam_role_unique_id
+}
+
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+output "node_iam_role_name" {
+  description = "EKS Auto node IAM role name"
+  value       = module.eks.node_iam_role_name
+}
+
+output "node_iam_role_arn" {
+  description = "EKS Auto node IAM role ARN"
+  value       = module.eks.node_iam_role_arn
+}
+
+output "node_iam_role_unique_id" {
+  description = "Stable and unique string identifying the IAM role"
+  value       = module.eks.node_iam_role_unique_id
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+output "cluster_addons" {
+  description = "Map of attribute maps for all EKS cluster addons enabled"
+  value       = module.eks.cluster_addons
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+output "cluster_identity_providers" {
+  description = "Map of attribute maps for all EKS identity providers enabled"
+  value       = module.eks.cluster_identity_providers
+}
+
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+output "cloudwatch_log_group_name" {
+  description = "Name of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_name
+}
+
+output "cloudwatch_log_group_arn" {
+  description = "Arn of cloudwatch log group created"
+  value       = module.eks.cloudwatch_log_group_arn
+}
+
+################################################################################
+# Fargate Profile
+################################################################################
+
+output "fargate_profiles" {
+  description = "Map of attribute maps for all EKS Fargate Profiles created"
+  value       = module.eks.fargate_profiles
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+output "eks_managed_node_groups" {
+  description = "Map of attribute maps for all EKS managed node groups created"
+  value       = module.eks.eks_managed_node_groups
+}
+
+output "eks_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by EKS managed node groups"
+  value       = module.eks.eks_managed_node_groups_autoscaling_group_names
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+output "self_managed_node_groups" {
+  description = "Map of attribute maps for all self managed node groups created"
+  value       = module.eks.self_managed_node_groups
+}
+
+output "self_managed_node_groups_autoscaling_group_names" {
+  description = "List of the autoscaling group names created by self-managed node groups"
+  value       = module.eks.self_managed_node_groups_autoscaling_group_names
+}
diff --git a/tests/self-managed-node-group/variables.tf b/tests/self-managed-node-group/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/self-managed-node-group/versions.tf b/tests/self-managed-node-group/versions.tf
new file mode 100644
index 0000000000..db13b0a8d2
--- /dev/null
+++ b/tests/self-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+  }
+}
diff --git a/tests/user-data/README.md b/tests/user-data/README.md
new file mode 100644
index 0000000000..684a3a7648
--- /dev/null
+++ b/tests/user-data/README.md
@@ -0,0 +1,107 @@
+# Internal User Data Module
+
+Configuration in this directory render various user data outputs used for testing and validating the internal `_user-data` sub-module.
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.7 |
+| <a name="requirement_local"></a> [local](#requirement\_local) | >= 2.4 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_local"></a> [local](#provider\_local) | >= 2.4 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_eks_mng_al2023_additional"></a> [eks\_mng\_al2023\_additional](#module\_eks\_mng\_al2023\_additional) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2023_custom_ami"></a> [eks\_mng\_al2023\_custom\_ami](#module\_eks\_mng\_al2023\_custom\_ami) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2023_custom_template"></a> [eks\_mng\_al2023\_custom\_template](#module\_eks\_mng\_al2023\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2023_no_op"></a> [eks\_mng\_al2023\_no\_op](#module\_eks\_mng\_al2023\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2_additional"></a> [eks\_mng\_al2\_additional](#module\_eks\_mng\_al2\_additional) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2_custom_ami"></a> [eks\_mng\_al2\_custom\_ami](#module\_eks\_mng\_al2\_custom\_ami) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2_custom_ami_ipv6"></a> [eks\_mng\_al2\_custom\_ami\_ipv6](#module\_eks\_mng\_al2\_custom\_ami\_ipv6) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2_custom_template"></a> [eks\_mng\_al2\_custom\_template](#module\_eks\_mng\_al2\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2_disabled"></a> [eks\_mng\_al2\_disabled](#module\_eks\_mng\_al2\_disabled) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_al2_no_op"></a> [eks\_mng\_al2\_no\_op](#module\_eks\_mng\_al2\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_bottlerocket_additional"></a> [eks\_mng\_bottlerocket\_additional](#module\_eks\_mng\_bottlerocket\_additional) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_bottlerocket_custom_ami"></a> [eks\_mng\_bottlerocket\_custom\_ami](#module\_eks\_mng\_bottlerocket\_custom\_ami) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_bottlerocket_custom_template"></a> [eks\_mng\_bottlerocket\_custom\_template](#module\_eks\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_bottlerocket_no_op"></a> [eks\_mng\_bottlerocket\_no\_op](#module\_eks\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_windows_additional"></a> [eks\_mng\_windows\_additional](#module\_eks\_mng\_windows\_additional) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_windows_custom_ami"></a> [eks\_mng\_windows\_custom\_ami](#module\_eks\_mng\_windows\_custom\_ami) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_windows_custom_template"></a> [eks\_mng\_windows\_custom\_template](#module\_eks\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_eks_mng_windows_no_op"></a> [eks\_mng\_windows\_no\_op](#module\_eks\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2023_bootstrap"></a> [self\_mng\_al2023\_bootstrap](#module\_self\_mng\_al2023\_bootstrap) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2023_custom_template"></a> [self\_mng\_al2023\_custom\_template](#module\_self\_mng\_al2023\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2023_no_op"></a> [self\_mng\_al2023\_no\_op](#module\_self\_mng\_al2023\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2_bootstrap"></a> [self\_mng\_al2\_bootstrap](#module\_self\_mng\_al2\_bootstrap) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2_bootstrap_ipv6"></a> [self\_mng\_al2\_bootstrap\_ipv6](#module\_self\_mng\_al2\_bootstrap\_ipv6) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2_custom_template"></a> [self\_mng\_al2\_custom\_template](#module\_self\_mng\_al2\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_al2_no_op"></a> [self\_mng\_al2\_no\_op](#module\_self\_mng\_al2\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_bottlerocket_bootstrap"></a> [self\_mng\_bottlerocket\_bootstrap](#module\_self\_mng\_bottlerocket\_bootstrap) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_bottlerocket_custom_template"></a> [self\_mng\_bottlerocket\_custom\_template](#module\_self\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_bottlerocket_no_op"></a> [self\_mng\_bottlerocket\_no\_op](#module\_self\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_windows_bootstrap"></a> [self\_mng\_windows\_bootstrap](#module\_self\_mng\_windows\_bootstrap) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_windows_custom_template"></a> [self\_mng\_windows\_custom\_template](#module\_self\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a |
+| <a name="module_self_mng_windows_no_op"></a> [self\_mng\_windows\_no\_op](#module\_self\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [local_file.eks_mng_al2023_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2023_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2023_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2023_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2_custom_ami_ipv6](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_al2_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_bottlerocket_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_bottlerocket_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_bottlerocket_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_bottlerocket_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_windows_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_windows_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_windows_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.eks_mng_windows_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2023_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2023_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2023_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2_bootstrap_ipv6](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_al2_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_bottlerocket_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_bottlerocket_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_bottlerocket_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_windows_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_windows_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.self_mng_windows_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+No outputs.
+<!-- END_TF_DOCS -->
diff --git a/tests/user-data/main.tf b/tests/user-data/main.tf
new file mode 100644
index 0000000000..835d9fc347
--- /dev/null
+++ b/tests/user-data/main.tf
@@ -0,0 +1,672 @@
+locals {
+  name = "ex-${replace(basename(path.cwd), "_", "-")}"
+
+  cluster_endpoint          = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+  cluster_auth_base64       = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+  cluster_service_ipv4_cidr = "172.16.0.0/16"
+  cluster_service_ipv6_cidr = "fdd3:7636:68bc::/108"
+  cluster_service_cidr      = "192.168.0.0/16"
+}
+
+################################################################################
+# EKS managed node group - AL2
+################################################################################
+
+module "eks_mng_al2_disabled" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2_x86_64"
+  create   = false
+}
+
+module "eks_mng_al2_no_op" {
+  source = "../../modules/_user_data"
+
+  # Hard requirement
+  ami_type             = "AL2_x86_64"
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "eks_mng_al2_additional" {
+  source = "../../modules/_user_data"
+
+  # Hard requirement
+  ami_type             = "AL2_x86_64"
+  cluster_service_cidr = local.cluster_service_cidr
+
+  pre_bootstrap_user_data = <<-EOT
+    export USE_MAX_PODS=false
+  EOT
+}
+
+module "eks_mng_al2_custom_ami" {
+  source = "../../modules/_user_data"
+
+  ami_type             = "AL2_x86_64"
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_ipv4_cidr
+
+  enable_bootstrap_user_data = true
+
+  pre_bootstrap_user_data = <<-EOT
+    export FOO=bar
+  EOT
+
+  bootstrap_extra_args = "--kubelet-extra-args '--instance-type t3a.large'"
+
+  post_bootstrap_user_data = <<-EOT
+    echo "All done"
+  EOT
+}
+
+module "eks_mng_al2_custom_ami_ipv6" {
+  source = "../../modules/_user_data"
+
+  ami_type             = "AL2_x86_64"
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_ip_family    = "ipv6"
+  cluster_service_cidr = local.cluster_service_ipv6_cidr
+
+  enable_bootstrap_user_data = true
+
+  pre_bootstrap_user_data = <<-EOT
+    export FOO=bar
+  EOT
+
+  bootstrap_extra_args = "--kubelet-extra-args '--instance-type t3a.large'"
+
+  post_bootstrap_user_data = <<-EOT
+    echo "All done"
+  EOT
+}
+
+module "eks_mng_al2_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type             = "AL2_x86_64"
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_ipv4_cidr
+
+  user_data_template_path = "${path.module}/templates/linux_custom.tpl"
+
+  pre_bootstrap_user_data = <<-EOT
+    echo "foo"
+    export FOO=bar
+  EOT
+
+  bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+  post_bootstrap_user_data = <<-EOT
+    echo "All done"
+  EOT
+}
+
+################################################################################
+# EKS managed node group - AL2023
+################################################################################
+
+module "eks_mng_al2023_no_op" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "eks_mng_al2023_additional" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  cloudinit_pre_nodeadm = [{
+    content      = <<-EOT
+      ---
+      apiVersion: node.eks.aws/v1alpha1
+      kind: NodeConfig
+      spec:
+        kubelet:
+          config:
+            shutdownGracePeriod: 30s
+            featureGates:
+              DisableKubeletCloudCredentialProviders: true
+    EOT
+    content_type = "application/node.eks.aws"
+  }]
+}
+
+module "eks_mng_al2023_custom_ami" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_cidr
+
+  enable_bootstrap_user_data = true
+
+  cloudinit_pre_nodeadm = [{
+    content      = <<-EOT
+      ---
+      apiVersion: node.eks.aws/v1alpha1
+      kind: NodeConfig
+      spec:
+        kubelet:
+          config:
+            shutdownGracePeriod: 30s
+            featureGates:
+              DisableKubeletCloudCredentialProviders: true
+    EOT
+    content_type = "application/node.eks.aws"
+  }]
+
+  cloudinit_post_nodeadm = [{
+    content      = <<-EOT
+      echo "All done"
+    EOT
+    content_type = "text/x-shellscript; charset=\"us-ascii\""
+  }]
+}
+
+module "eks_mng_al2023_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_cidr
+
+  enable_bootstrap_user_data = true
+  user_data_template_path    = "${path.module}/templates/al2023_custom.tpl"
+
+  cloudinit_pre_nodeadm = [{
+    content      = <<-EOT
+      ---
+      apiVersion: node.eks.aws/v1alpha1
+      kind: NodeConfig
+      spec:
+        kubelet:
+          config:
+            shutdownGracePeriod: 30s
+            featureGates:
+              DisableKubeletCloudCredentialProviders: true
+    EOT
+    content_type = "application/node.eks.aws"
+  }]
+
+  cloudinit_post_nodeadm = [{
+    content      = <<-EOT
+      echo "All done"
+    EOT
+    content_type = "text/x-shellscript; charset=\"us-ascii\""
+  }]
+}
+
+################################################################################
+# EKS managed node group - Bottlerocket
+################################################################################
+
+module "eks_mng_bottlerocket_no_op" {
+  source = "../../modules/_user_data"
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "eks_mng_bottlerocket_additional" {
+  source = "../../modules/_user_data"
+
+  ami_type             = "BOTTLEROCKET_x86_64"
+  cluster_service_cidr = local.cluster_service_cidr
+
+  bootstrap_extra_args = <<-EOT
+    # extra args added
+    [settings.kernel]
+    lockdown = "integrity"
+  EOT
+}
+
+module "eks_mng_bottlerocket_custom_ami" {
+  source = "../../modules/_user_data"
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_cidr
+  additional_cluster_dns_ips = [
+    "169.254.20.10"
+  ]
+
+  enable_bootstrap_user_data = true
+
+  bootstrap_extra_args = <<-EOT
+    # extra args added
+    [settings.kernel]
+    lockdown = "integrity"
+  EOT
+}
+
+module "eks_mng_bottlerocket_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
+
+  bootstrap_extra_args = <<-EOT
+    # extra args added
+    [settings.kernel]
+    lockdown = "integrity"
+  EOT
+}
+
+################################################################################
+# EKS managed node group - Windows
+################################################################################
+
+module "eks_mng_windows_no_op" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "eks_mng_windows_additional" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  pre_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+}
+
+module "eks_mng_windows_custom_ami" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  enable_bootstrap_user_data = true
+
+  pre_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+  # I don't know if this is the right way on Windows, but its just a string check here anyways
+  bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+  post_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+}
+
+module "eks_mng_windows_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  enable_bootstrap_user_data = true
+
+  user_data_template_path = "${path.module}/templates/windows_custom.tpl"
+
+  pre_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+  # I don't know if this is the right way on Windows, but its just a string check here anyways
+  bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+  post_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+}
+
+################################################################################
+# Self-managed node group - AL2
+################################################################################
+
+module "self_mng_al2_no_op" {
+  source = "../../modules/_user_data"
+
+  is_eks_managed_node_group = false
+
+  # Hard requirement
+  ami_type             = "AL2_x86_64"
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "self_mng_al2_bootstrap" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_ipv4_cidr
+
+  pre_bootstrap_user_data = <<-EOT
+    echo "foo"
+    export FOO=bar
+  EOT
+
+  bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+  post_bootstrap_user_data = <<-EOT
+    echo "All done"
+  EOT
+}
+
+module "self_mng_al2_bootstrap_ipv6" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_ip_family    = "ipv6"
+  cluster_service_cidr = local.cluster_service_ipv6_cidr
+
+  pre_bootstrap_user_data = <<-EOT
+    echo "foo"
+    export FOO=bar
+  EOT
+
+  bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+  post_bootstrap_user_data = <<-EOT
+    echo "All done"
+  EOT
+}
+
+module "self_mng_al2_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_ipv4_cidr
+
+  user_data_template_path = "${path.module}/templates/linux_custom.tpl"
+
+  pre_bootstrap_user_data = <<-EOT
+    echo "foo"
+    export FOO=bar
+  EOT
+
+  bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
+
+  post_bootstrap_user_data = <<-EOT
+    echo "All done"
+  EOT
+}
+
+################################################################################
+# Self-managed node group - AL2023
+################################################################################
+
+module "self_mng_al2023_no_op" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  is_eks_managed_node_group = false
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "self_mng_al2023_bootstrap" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_cidr
+
+  cloudinit_pre_nodeadm = [{
+    content      = <<-EOT
+      ---
+      apiVersion: node.eks.aws/v1alpha1
+      kind: NodeConfig
+      spec:
+        kubelet:
+          config:
+            shutdownGracePeriod: 30s
+            featureGates:
+              DisableKubeletCloudCredentialProviders: true
+    EOT
+    content_type = "application/node.eks.aws"
+  }]
+
+  cloudinit_post_nodeadm = [{
+    content      = <<-EOT
+      echo "All done"
+    EOT
+    content_type = "text/x-shellscript; charset=\"us-ascii\""
+  }]
+}
+
+module "self_mng_al2023_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "AL2023_x86_64_STANDARD"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name         = local.name
+  cluster_endpoint     = local.cluster_endpoint
+  cluster_auth_base64  = local.cluster_auth_base64
+  cluster_service_cidr = local.cluster_service_cidr
+
+  user_data_template_path = "${path.module}/templates/al2023_custom.tpl"
+
+  cloudinit_pre_nodeadm = [{
+    content      = <<-EOT
+      ---
+      apiVersion: node.eks.aws/v1alpha1
+      kind: NodeConfig
+      spec:
+        kubelet:
+          config:
+            shutdownGracePeriod: 30s
+            featureGates:
+              DisableKubeletCloudCredentialProviders: true
+    EOT
+    content_type = "application/node.eks.aws"
+  }]
+
+  cloudinit_post_nodeadm = [{
+    content      = <<-EOT
+      echo "All done"
+    EOT
+    content_type = "text/x-shellscript; charset=\"us-ascii\""
+  }]
+}
+
+################################################################################
+# Self-managed node group - Bottlerocket
+################################################################################
+
+module "self_mng_bottlerocket_no_op" {
+  source = "../../modules/_user_data"
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  is_eks_managed_node_group = false
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "self_mng_bottlerocket_bootstrap" {
+  source = "../../modules/_user_data"
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  bootstrap_extra_args = <<-EOT
+    # extra args added
+    [settings.kernel]
+    lockdown = "integrity"
+  EOT
+}
+
+module "self_mng_bottlerocket_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "BOTTLEROCKET_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
+
+  bootstrap_extra_args = <<-EOT
+    # extra args added
+    [settings.kernel]
+    lockdown = "integrity"
+  EOT
+}
+
+################################################################################
+# Self-managed node group - Windows
+################################################################################
+
+module "self_mng_windows_no_op" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  is_eks_managed_node_group = false
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+}
+
+module "self_mng_windows_bootstrap" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  pre_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+  # I don't know if this is the right way on Windows, but its just a string check here anyways
+  bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+  post_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+}
+
+module "self_mng_windows_custom_template" {
+  source = "../../modules/_user_data"
+
+  ami_type = "WINDOWS_CORE_2022_x86_64"
+
+  enable_bootstrap_user_data = true
+  is_eks_managed_node_group  = false
+
+  cluster_name        = local.name
+  cluster_endpoint    = local.cluster_endpoint
+  cluster_auth_base64 = local.cluster_auth_base64
+
+  # Hard requirement
+  cluster_service_cidr = local.cluster_service_cidr
+
+  user_data_template_path = "${path.module}/templates/windows_custom.tpl"
+
+  pre_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+  # I don't know if this is the right way on Windows, but its just a string check here anyways
+  bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
+
+  post_bootstrap_user_data = <<-EOT
+    [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+  EOT
+}
diff --git a/tests/user-data/outputs.tf b/tests/user-data/outputs.tf
new file mode 100644
index 0000000000..c40f6632d0
--- /dev/null
+++ b/tests/user-data/outputs.tf
@@ -0,0 +1,189 @@
+################################################################################
+# We are writing to local file so that we can better track diffs across changes
+#
+# Its harder to verify changes and diffs when we use the standard `output`
+# route, writing to file makes this easier and better highlights changes
+# to avoid unintended disruptions
+################################################################################
+
+################################################################################
+# EKS managed node group - AL2
+################################################################################
+
+resource "local_file" "eks_mng_al2_no_op" {
+  content  = base64decode(module.eks_mng_al2_no_op.user_data)
+  filename = "${path.module}/rendered/al2/eks-mng-no-op.sh"
+}
+
+resource "local_file" "eks_mng_al2_additional" {
+  content  = base64decode(module.eks_mng_al2_additional.user_data)
+  filename = "${path.module}/rendered/al2/eks-mng-additional.txt"
+}
+
+resource "local_file" "eks_mng_al2_custom_ami" {
+  content  = base64decode(module.eks_mng_al2_custom_ami.user_data)
+  filename = "${path.module}/rendered/al2/eks-mng-custom-ami.sh"
+}
+
+resource "local_file" "eks_mng_al2_custom_ami_ipv6" {
+  content  = base64decode(module.eks_mng_al2_custom_ami_ipv6.user_data)
+  filename = "${path.module}/rendered/al2/eks-mng-custom-ami-ipv6.sh"
+}
+
+resource "local_file" "eks_mng_al2_custom_template" {
+  content  = base64decode(module.eks_mng_al2_custom_template.user_data)
+  filename = "${path.module}/rendered/al2/eks-mng-custom-template.sh"
+}
+
+################################################################################
+# EKS managed node group - AL2023
+################################################################################
+
+resource "local_file" "eks_mng_al2023_no_op" {
+  content  = base64decode(module.eks_mng_al2023_no_op.user_data)
+  filename = "${path.module}/rendered/al2023/eks-mng-no-op.txt"
+}
+
+resource "local_file" "eks_mng_al2023_additional" {
+  content  = base64decode(module.eks_mng_al2023_additional.user_data)
+  filename = "${path.module}/rendered/al2023/eks-mng-additional.txt"
+}
+
+resource "local_file" "eks_mng_al2023_custom_ami" {
+  content  = base64decode(module.eks_mng_al2023_custom_ami.user_data)
+  filename = "${path.module}/rendered/al2023/eks-mng-custom-ami.txt"
+}
+
+resource "local_file" "eks_mng_al2023_custom_template" {
+  content  = base64decode(module.eks_mng_al2023_custom_template.user_data)
+  filename = "${path.module}/rendered/al2023/eks-mng-custom-template.txt"
+}
+
+################################################################################
+# EKS managed node group - Bottlerocket
+################################################################################
+
+resource "local_file" "eks_mng_bottlerocket_no_op" {
+  content  = base64decode(module.eks_mng_bottlerocket_no_op.user_data)
+  filename = "${path.module}/rendered/bottlerocket/eks-mng-no-op.toml"
+}
+
+resource "local_file" "eks_mng_bottlerocket_additional" {
+  content  = base64decode(module.eks_mng_bottlerocket_additional.user_data)
+  filename = "${path.module}/rendered/bottlerocket/eks-mng-additional.toml"
+}
+
+resource "local_file" "eks_mng_bottlerocket_custom_ami" {
+  content  = base64decode(module.eks_mng_bottlerocket_custom_ami.user_data)
+  filename = "${path.module}/rendered/bottlerocket/eks-mng-custom-ami.toml"
+}
+
+resource "local_file" "eks_mng_bottlerocket_custom_template" {
+  content  = base64decode(module.eks_mng_bottlerocket_custom_template.user_data)
+  filename = "${path.module}/rendered/bottlerocket/eks-mng-custom-template.toml"
+}
+
+################################################################################
+# EKS managed node group - Windows
+################################################################################
+
+resource "local_file" "eks_mng_windows_no_op" {
+  content  = base64decode(module.eks_mng_windows_no_op.user_data)
+  filename = "${path.module}/rendered/windows/eks-mng-no-op.ps1"
+}
+
+resource "local_file" "eks_mng_windows_additional" {
+  content  = base64decode(module.eks_mng_windows_additional.user_data)
+  filename = "${path.module}/rendered/windows/eks-mng-additional.ps1"
+}
+
+resource "local_file" "eks_mng_windows_custom_ami" {
+  content  = base64decode(module.eks_mng_windows_custom_ami.user_data)
+  filename = "${path.module}/rendered/windows/eks-mng-custom-ami.ps1"
+}
+
+resource "local_file" "eks_mng_windows_custom_template" {
+  content  = base64decode(module.eks_mng_windows_custom_template.user_data)
+  filename = "${path.module}/rendered/windows/eks-mng-custom-template.ps1"
+}
+
+################################################################################
+# Self-managed node group - AL2
+################################################################################
+
+resource "local_file" "self_mng_al2_no_op" {
+  content  = base64decode(module.self_mng_al2_no_op.user_data)
+  filename = "${path.module}/rendered/al2/self-mng-no-op.sh"
+}
+
+resource "local_file" "self_mng_al2_bootstrap" {
+  content  = base64decode(module.self_mng_al2_bootstrap.user_data)
+  filename = "${path.module}/rendered/al2/self-mng-bootstrap.sh"
+}
+
+resource "local_file" "self_mng_al2_bootstrap_ipv6" {
+  content  = base64decode(module.self_mng_al2_bootstrap_ipv6.user_data)
+  filename = "${path.module}/rendered/al2/self-mng-bootstrap-ipv6.sh"
+}
+
+resource "local_file" "self_mng_al2_custom_template" {
+  content  = base64decode(module.self_mng_al2_custom_template.user_data)
+  filename = "${path.module}/rendered/al2/self-mng-custom-template.sh"
+}
+
+################################################################################
+# Self-managed node group - AL2023
+################################################################################
+
+resource "local_file" "self_mng_al2023_no_op" {
+  content  = base64decode(module.self_mng_al2023_no_op.user_data)
+  filename = "${path.module}/rendered/al2023/self-mng-no-op.txt"
+}
+
+resource "local_file" "self_mng_al2023_bootstrap" {
+  content  = base64decode(module.self_mng_al2023_bootstrap.user_data)
+  filename = "${path.module}/rendered/al2023/self-mng-bootstrap.txt"
+}
+
+resource "local_file" "self_mng_al2023_custom_template" {
+  content  = base64decode(module.self_mng_al2023_custom_template.user_data)
+  filename = "${path.module}/rendered/al2023/self-mng-custom-template.txt"
+}
+
+################################################################################
+# Self-managed node group - Bottlerocket
+################################################################################
+
+resource "local_file" "self_mng_bottlerocket_no_op" {
+  content  = base64decode(module.self_mng_bottlerocket_no_op.user_data)
+  filename = "${path.module}/rendered/bottlerocket/self-mng-no-op.toml"
+}
+
+resource "local_file" "self_mng_bottlerocket_bootstrap" {
+  content  = base64decode(module.self_mng_bottlerocket_bootstrap.user_data)
+  filename = "${path.module}/rendered/bottlerocket/self-mng-bootstrap.toml"
+}
+
+resource "local_file" "self_mng_bottlerocket_custom_template" {
+  content  = base64decode(module.self_mng_bottlerocket_custom_template.user_data)
+  filename = "${path.module}/rendered/bottlerocket/self-mng-custom-template.toml"
+}
+
+################################################################################
+# Self-managed node group - Windows
+################################################################################
+
+resource "local_file" "self_mng_windows_no_op" {
+  content  = base64decode(module.self_mng_windows_no_op.user_data)
+  filename = "${path.module}/rendered/windows/self-mng-no-op.ps1"
+}
+
+resource "local_file" "self_mng_windows_bootstrap" {
+  content  = base64decode(module.self_mng_windows_bootstrap.user_data)
+  filename = "${path.module}/rendered/windows/self-mng-bootstrap.ps1"
+}
+
+resource "local_file" "self_mng_windows_custom_template" {
+  content  = base64decode(module.self_mng_windows_custom_template.user_data)
+  filename = "${path.module}/rendered/windows/self-mng-custom-template.ps1"
+}
diff --git a/tests/user-data/rendered/al2/eks-mng-additional.txt b/tests/user-data/rendered/al2/eks-mng-additional.txt
new file mode 100755
index 0000000000..151f0cba7a
--- /dev/null
+++ b/tests/user-data/rendered/al2/eks-mng-additional.txt
@@ -0,0 +1,11 @@
+Content-Type: multipart/mixed; boundary="//"
+MIME-Version: 1.0
+
+--//
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript
+Mime-Version: 1.0
+
+export USE_MAX_PODS=false
+
+--//--
diff --git a/tests/user-data/rendered/al2/eks-mng-custom-ami-ipv6.sh b/tests/user-data/rendered/al2/eks-mng-custom-ami-ipv6.sh
new file mode 100755
index 0000000000..fceb7e3571
--- /dev/null
+++ b/tests/user-data/rendered/al2/eks-mng-custom-ami-ipv6.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -e
+export FOO=bar
+B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--instance-type t3a.large' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+  --ip-family ipv6 --service-ipv6-cidr fdd3:7636:68bc::/108
+echo "All done"
diff --git a/tests/user-data/rendered/al2/eks-mng-custom-ami.sh b/tests/user-data/rendered/al2/eks-mng-custom-ami.sh
new file mode 100755
index 0000000000..c7d92a7ce4
--- /dev/null
+++ b/tests/user-data/rendered/al2/eks-mng-custom-ami.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -e
+export FOO=bar
+B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--instance-type t3a.large' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+  --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16
+echo "All done"
diff --git a/tests/user-data/rendered/al2/eks-mng-custom-template.sh b/tests/user-data/rendered/al2/eks-mng-custom-template.sh
new file mode 100755
index 0000000000..e18460fa1d
--- /dev/null
+++ b/tests/user-data/rendered/al2/eks-mng-custom-template.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+echo "foo"
+export FOO=bar
+
+# Custom user data template provided for rendering
+B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+    --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16
+echo "All done"
diff --git a/tests/user-data/rendered/al2/eks-mng-no-op.sh b/tests/user-data/rendered/al2/eks-mng-no-op.sh
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/al2/self-mng-bootstrap-ipv6.sh b/tests/user-data/rendered/al2/self-mng-bootstrap-ipv6.sh
new file mode 100755
index 0000000000..b6fd557a13
--- /dev/null
+++ b/tests/user-data/rendered/al2/self-mng-bootstrap-ipv6.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+set -e
+echo "foo"
+export FOO=bar
+B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+  --ip-family ipv6 --service-ipv6-cidr fdd3:7636:68bc::/108
+echo "All done"
diff --git a/tests/user-data/rendered/al2/self-mng-bootstrap.sh b/tests/user-data/rendered/al2/self-mng-bootstrap.sh
new file mode 100755
index 0000000000..7fcd81973e
--- /dev/null
+++ b/tests/user-data/rendered/al2/self-mng-bootstrap.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+set -e
+echo "foo"
+export FOO=bar
+B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+  --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16
+echo "All done"
diff --git a/tests/user-data/rendered/al2/self-mng-custom-template.sh b/tests/user-data/rendered/al2/self-mng-custom-template.sh
new file mode 100755
index 0000000000..e18460fa1d
--- /dev/null
+++ b/tests/user-data/rendered/al2/self-mng-custom-template.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+echo "foo"
+export FOO=bar
+
+# Custom user data template provided for rendering
+B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+    --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16
+echo "All done"
diff --git a/tests/user-data/rendered/al2/self-mng-no-op.sh b/tests/user-data/rendered/al2/self-mng-no-op.sh
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/al2023/eks-mng-additional.txt b/tests/user-data/rendered/al2023/eks-mng-additional.txt
new file mode 100755
index 0000000000..fe3c75c898
--- /dev/null
+++ b/tests/user-data/rendered/al2023/eks-mng-additional.txt
@@ -0,0 +1,19 @@
+Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
+MIME-Version: 1.0
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  kubelet:
+    config:
+      shutdownGracePeriod: 30s
+      featureGates:
+        DisableKubeletCloudCredentialProviders: true
+
+--MIMEBOUNDARY--
diff --git a/tests/user-data/rendered/al2023/eks-mng-custom-ami.txt b/tests/user-data/rendered/al2023/eks-mng-custom-ami.txt
new file mode 100755
index 0000000000..46362c2030
--- /dev/null
+++ b/tests/user-data/rendered/al2023/eks-mng-custom-ami.txt
@@ -0,0 +1,41 @@
+Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
+MIME-Version: 1.0
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  kubelet:
+    config:
+      shutdownGracePeriod: 30s
+      featureGates:
+        DisableKubeletCloudCredentialProviders: true
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  cluster:
+    name: ex-user-data
+    apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+    certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+    cidr: 192.168.0.0/16
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript; charset="us-ascii"
+Mime-Version: 1.0
+
+echo "All done"
+
+--MIMEBOUNDARY--
diff --git a/tests/user-data/rendered/al2023/eks-mng-custom-template.txt b/tests/user-data/rendered/al2023/eks-mng-custom-template.txt
new file mode 100755
index 0000000000..a97e188c83
--- /dev/null
+++ b/tests/user-data/rendered/al2023/eks-mng-custom-template.txt
@@ -0,0 +1,45 @@
+Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
+MIME-Version: 1.0
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  kubelet:
+    config:
+      shutdownGracePeriod: 30s
+      featureGates:
+        DisableKubeletCloudCredentialProviders: true
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  cluster:
+    name: ex-user-data
+    apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+    certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+    cidr: 192.168.0.0/16
+  containerd:
+    config: |
+      [plugins."io.containerd.grpc.v1.cri".containerd]
+      discard_unpacked_layers = false
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript; charset="us-ascii"
+Mime-Version: 1.0
+
+echo "All done"
+
+--MIMEBOUNDARY--
diff --git a/tests/user-data/rendered/al2023/eks-mng-no-op.txt b/tests/user-data/rendered/al2023/eks-mng-no-op.txt
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/al2023/self-mng-bootstrap.txt b/tests/user-data/rendered/al2023/self-mng-bootstrap.txt
new file mode 100755
index 0000000000..46362c2030
--- /dev/null
+++ b/tests/user-data/rendered/al2023/self-mng-bootstrap.txt
@@ -0,0 +1,41 @@
+Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
+MIME-Version: 1.0
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  kubelet:
+    config:
+      shutdownGracePeriod: 30s
+      featureGates:
+        DisableKubeletCloudCredentialProviders: true
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  cluster:
+    name: ex-user-data
+    apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+    certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+    cidr: 192.168.0.0/16
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript; charset="us-ascii"
+Mime-Version: 1.0
+
+echo "All done"
+
+--MIMEBOUNDARY--
diff --git a/tests/user-data/rendered/al2023/self-mng-custom-template.txt b/tests/user-data/rendered/al2023/self-mng-custom-template.txt
new file mode 100755
index 0000000000..a97e188c83
--- /dev/null
+++ b/tests/user-data/rendered/al2023/self-mng-custom-template.txt
@@ -0,0 +1,45 @@
+Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
+MIME-Version: 1.0
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  kubelet:
+    config:
+      shutdownGracePeriod: 30s
+      featureGates:
+        DisableKubeletCloudCredentialProviders: true
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: application/node.eks.aws
+Mime-Version: 1.0
+
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  cluster:
+    name: ex-user-data
+    apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com
+    certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
+    cidr: 192.168.0.0/16
+  containerd:
+    config: |
+      [plugins."io.containerd.grpc.v1.cri".containerd]
+      discard_unpacked_layers = false
+
+--MIMEBOUNDARY
+Content-Transfer-Encoding: 7bit
+Content-Type: text/x-shellscript; charset="us-ascii"
+Mime-Version: 1.0
+
+echo "All done"
+
+--MIMEBOUNDARY--
diff --git a/tests/user-data/rendered/al2023/self-mng-no-op.txt b/tests/user-data/rendered/al2023/self-mng-no-op.txt
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/bottlerocket/eks-mng-additional.toml b/tests/user-data/rendered/bottlerocket/eks-mng-additional.toml
new file mode 100755
index 0000000000..7ed4affaf6
--- /dev/null
+++ b/tests/user-data/rendered/bottlerocket/eks-mng-additional.toml
@@ -0,0 +1,3 @@
+# extra args added
+[settings.kernel]
+lockdown = "integrity"
diff --git a/tests/user-data/rendered/bottlerocket/eks-mng-custom-ami.toml b/tests/user-data/rendered/bottlerocket/eks-mng-custom-ami.toml
new file mode 100755
index 0000000000..38b0c46a0b
--- /dev/null
+++ b/tests/user-data/rendered/bottlerocket/eks-mng-custom-ami.toml
@@ -0,0 +1,8 @@
+[settings.kubernetes]
+"cluster-name" = "ex-user-data"
+"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+"cluster-dns-ip" = ["192.168.0.10", "169.254.20.10"]
+# extra args added
+[settings.kernel]
+lockdown = "integrity"
diff --git a/tests/user-data/rendered/bottlerocket/eks-mng-custom-template.toml b/tests/user-data/rendered/bottlerocket/eks-mng-custom-template.toml
new file mode 100755
index 0000000000..c5c6774cfc
--- /dev/null
+++ b/tests/user-data/rendered/bottlerocket/eks-mng-custom-template.toml
@@ -0,0 +1,9 @@
+# Custom user data template provided for rendering
+[settings.kubernetes]
+"cluster-name" = "ex-user-data"
+"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+
+# extra args added
+[settings.kernel]
+lockdown = "integrity"
diff --git a/tests/user-data/rendered/bottlerocket/eks-mng-no-op.toml b/tests/user-data/rendered/bottlerocket/eks-mng-no-op.toml
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/bottlerocket/self-mng-bootstrap.toml b/tests/user-data/rendered/bottlerocket/self-mng-bootstrap.toml
new file mode 100755
index 0000000000..76f8b82dcd
--- /dev/null
+++ b/tests/user-data/rendered/bottlerocket/self-mng-bootstrap.toml
@@ -0,0 +1,8 @@
+[settings.kubernetes]
+"cluster-name" = "ex-user-data"
+"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+"cluster-dns-ip" = ["192.168.0.10"]
+# extra args added
+[settings.kernel]
+lockdown = "integrity"
diff --git a/tests/user-data/rendered/bottlerocket/self-mng-custom-template.toml b/tests/user-data/rendered/bottlerocket/self-mng-custom-template.toml
new file mode 100755
index 0000000000..c5c6774cfc
--- /dev/null
+++ b/tests/user-data/rendered/bottlerocket/self-mng-custom-template.toml
@@ -0,0 +1,9 @@
+# Custom user data template provided for rendering
+[settings.kubernetes]
+"cluster-name" = "ex-user-data"
+"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
+"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
+
+# extra args added
+[settings.kernel]
+lockdown = "integrity"
diff --git a/tests/user-data/rendered/bottlerocket/self-mng-no-op.toml b/tests/user-data/rendered/bottlerocket/self-mng-no-op.toml
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/windows/eks-mng-additional.ps1 b/tests/user-data/rendered/windows/eks-mng-additional.ps1
new file mode 100755
index 0000000000..0debfcf9ad
--- /dev/null
+++ b/tests/user-data/rendered/windows/eks-mng-additional.ps1
@@ -0,0 +1 @@
+[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
diff --git a/tests/user-data/rendered/windows/eks-mng-custom-ami.ps1 b/tests/user-data/rendered/windows/eks-mng-custom-ami.ps1
new file mode 100755
index 0000000000..182195b707
--- /dev/null
+++ b/tests/user-data/rendered/windows/eks-mng-custom-ami.ps1
@@ -0,0 +1,9 @@
+<powershell>
+[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+</powershell>
diff --git a/tests/user-data/rendered/windows/eks-mng-custom-template.ps1 b/tests/user-data/rendered/windows/eks-mng-custom-template.ps1
new file mode 100755
index 0000000000..aa4008c7e5
--- /dev/null
+++ b/tests/user-data/rendered/windows/eks-mng-custom-template.ps1
@@ -0,0 +1,10 @@
+# Custom user data template provided for rendering
+<powershell>
+[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+</powershell>
diff --git a/tests/user-data/rendered/windows/eks-mng-no-op.ps1 b/tests/user-data/rendered/windows/eks-mng-no-op.ps1
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/rendered/windows/self-mng-bootstrap.ps1 b/tests/user-data/rendered/windows/self-mng-bootstrap.ps1
new file mode 100755
index 0000000000..182195b707
--- /dev/null
+++ b/tests/user-data/rendered/windows/self-mng-bootstrap.ps1
@@ -0,0 +1,9 @@
+<powershell>
+[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+</powershell>
diff --git a/tests/user-data/rendered/windows/self-mng-custom-template.ps1 b/tests/user-data/rendered/windows/self-mng-custom-template.ps1
new file mode 100755
index 0000000000..aa4008c7e5
--- /dev/null
+++ b/tests/user-data/rendered/windows/self-mng-custom-template.ps1
@@ -0,0 +1,10 @@
+# Custom user data template provided for rendering
+<powershell>
+[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
+</powershell>
diff --git a/tests/user-data/rendered/windows/self-mng-no-op.ps1 b/tests/user-data/rendered/windows/self-mng-no-op.ps1
new file mode 100755
index 0000000000..e69de29bb2
diff --git a/tests/user-data/templates/al2023_custom.tpl b/tests/user-data/templates/al2023_custom.tpl
new file mode 100644
index 0000000000..34c566c154
--- /dev/null
+++ b/tests/user-data/templates/al2023_custom.tpl
@@ -0,0 +1,15 @@
+%{ if enable_bootstrap_user_data ~}
+---
+apiVersion: node.eks.aws/v1alpha1
+kind: NodeConfig
+spec:
+  cluster:
+    name: ${cluster_name}
+    apiServerEndpoint: ${cluster_endpoint}
+    certificateAuthority: ${cluster_auth_base64}
+    cidr: ${cluster_service_cidr}
+  containerd:
+    config: |
+      [plugins."io.containerd.grpc.v1.cri".containerd]
+      discard_unpacked_layers = false
+%{ endif ~}
diff --git a/tests/user-data/templates/bottlerocket_custom.tpl b/tests/user-data/templates/bottlerocket_custom.tpl
new file mode 100644
index 0000000000..6c4d9434a7
--- /dev/null
+++ b/tests/user-data/templates/bottlerocket_custom.tpl
@@ -0,0 +1,7 @@
+# Custom user data template provided for rendering
+[settings.kubernetes]
+"cluster-name" = "${cluster_name}"
+"api-server" = "${cluster_endpoint}"
+"cluster-certificate" = "${cluster_auth_base64}"
+
+${bootstrap_extra_args~}
diff --git a/tests/user-data/templates/linux_custom.tpl b/tests/user-data/templates/linux_custom.tpl
new file mode 100644
index 0000000000..b3cb73a2ab
--- /dev/null
+++ b/tests/user-data/templates/linux_custom.tpl
@@ -0,0 +1,11 @@
+#!/bin/bash
+set -ex
+
+${pre_bootstrap_user_data ~}
+
+# Custom user data template provided for rendering
+B64_CLUSTER_CA=${cluster_auth_base64}
+API_SERVER_URL=${cluster_endpoint}
+/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \
+    --ip-family ${cluster_ip_family} --service-${cluster_ip_family}-cidr ${cluster_service_cidr}
+${post_bootstrap_user_data ~}
diff --git a/tests/user-data/templates/windows_custom.tpl b/tests/user-data/templates/windows_custom.tpl
new file mode 100644
index 0000000000..3c1ca7014a
--- /dev/null
+++ b/tests/user-data/templates/windows_custom.tpl
@@ -0,0 +1,10 @@
+# Custom user data template provided for rendering
+<powershell>
+${pre_bootstrap_user_data ~}
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -APIServerEndpoint ${cluster_endpoint} -Base64ClusterCA ${cluster_auth_base64} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+${post_bootstrap_user_data ~}
+</powershell>
diff --git a/tests/user-data/variables.tf b/tests/user-data/variables.tf
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/user-data/versions.tf b/tests/user-data/versions.tf
new file mode 100644
index 0000000000..0d69a257f0
--- /dev/null
+++ b/tests/user-data/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+  required_version = ">= 1.5.7"
+
+  required_providers {
+    local = {
+      source  = "hashicorp/local"
+      version = ">= 2.4"
+    }
+  }
+}
diff --git a/tools/semtag b/tools/semtag
deleted file mode 100755
index 568d4241ad..0000000000
--- a/tools/semtag
+++ /dev/null
@@ -1,627 +0,0 @@
-#!/usr/bin/env bash
-#
-# Thanks to @pnikosis for this script https://github.com/pnikosis/semtag
-#
-PROG=semtag
-PROG_VERSION="v0.1.0"
-
-SEMVER_REGEX="^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(\-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$"
-IDENTIFIER_REGEX="^\-([0-9A-Za-z-]+)\.([0-9A-Za-z-]+)*$"
-
-# Global variables
-FIRST_VERSION="v0.0.0"
-finalversion=$FIRST_VERSION
-lastversion=$FIRST_VERSION
-hasversiontag="false"
-scope="patch"
-displayonly="false"
-forcetag="false"
-forcedversion=
-versionname=
-identifier=
-
-HELP="\
-Usage:
-  $PROG
-  $PROG getlast
-  $PROG getfinal
-  $PROG (final|alpha|beta|candidate) [-s <scope> (major|minor|patch|auto) | -o]
-  $PROG --help
-  $PROG --version
-Options:
-  -s         The scope that must be increased, can be major, minor or patch.
-               The resulting version will match X.Y.Z(-PRERELEASE)(+BUILD)
-               where X, Y and Z are positive integers, PRERELEASE is an optionnal
-               string composed of alphanumeric characters describing if the build is
-               a release candidate, alpha or beta version, with a number.
-               BUILD is also an optional string composed of alphanumeric
-               characters and hyphens.
-               Setting the scope as 'auto', the script will chose the scope between
-               'minor' and 'patch', depending on the amount of lines added (<10% will
-               choose patch).
-  -v         Specifies manually the version to be tagged, must be a valid semantic version
-               in the format X.Y.Z where X, Y and Z are positive integers.
-  -o         Output the version only, shows the bumped version, but doesn't tag.
-  -f         Forces to tag, even if there are unstaged or uncommited changes.
-Commands:
-  --help     Print this help message.
-  --version  Prints the program's version.
-  get        Returns both current final version and last tagged version.
-  getlast    Returns the latest tagged version.
-  getfinal   Returns the latest tagged final version.
-  getcurrent Returns the current version, based on the latest one, if there are uncommited or
-               unstaged changes, they will be reflected in the version, adding the number of
-               pending commits, current branch and commit hash.
-  final      Tags the current build as a final version, this only can be done on the master branch.
-  candidate  Tags the current build as a release candidate, the tag will contain all
-               the commits from the last final version.
-  alpha      Tags the current build as an alpha version, the tag will contain all
-               the commits from the last final version.
-  beta       Tags the current build as a beta version, the tag will contain all
-               the commits from the last final version."
-
-# Commands and options
-ACTION="getlast"
-ACTION="$1"
-shift
-
-# We get the parameters
-while getopts "v:s:of" opt; do
-  case $opt in
-    v)
-      forcedversion="$OPTARG"
-      ;;
-    s)
-      scope="$OPTARG"
-      ;;
-    o)
-      displayonly="true"
-      ;;
-    f)
-      forcetag="true"
-      ;;
-    \?)
-      echo "Invalid option: -$OPTARG" >&2
-      exit 1
-      ;;
-    :)
-      echo "Option -$OPTARG requires an argument." >&2
-      exit 1
-      ;;
-  esac
-done
-
-# Gets a string with the version and returns an array of maximum size of 5 with all the parts of the sematinc version
-# $1 The string containing the version in semantic format
-# $2 The variable to store the result array:
-#      position 0: major number
-#      position 1: minor number
-#      position 2: patch number
-#      position 3: identifier (or prerelease identifier)
-#      position 4: build info
-function explode_version {
-  local __version=$1
-  local __result=$2
-  if [[ $__version =~ $SEMVER_REGEX ]] ; then
-    local __major=${BASH_REMATCH[1]}
-    local __minor=${BASH_REMATCH[2]}
-    local __patch=${BASH_REMATCH[3]}
-    local __prere=${BASH_REMATCH[4]}
-    local __build=${BASH_REMATCH[5]}
-    eval "$__result=(\"$__major\" \"$__minor\" \"$__patch\" \"$__prere\" \"$__build\")"
-  else
-    eval "$__result="
-  fi
-}
-
-# Compare two versions and returns -1, 0 or 1
-# $1 The first version to compare
-# $2 The second version to compare
-# $3 The variable where to store the result
-function compare_versions {
-  local __first
-  local __second
-  explode_version $1 __first
-  explode_version $2 __second
-  local lv=$3
-
-  # Compares MAJOR, MINOR and PATCH
-  for i in 0 1 2; do
-    local __numberfirst=${__first[$i]}
-    local __numbersecond=${__second[$i]}
-    case $(($__numberfirst - $__numbersecond)) in
-      0)
-        ;;
-      -[0-9]*)
-        eval "$lv=-1"
-        return 0
-        ;;
-      [0-9]*)
-        eval "$lv=1"
-        return 0
-        ;;
-    esac
-  done
-
-  # Identifiers should compare with the ASCII order.
-  local __identifierfirst=${__first[3]}
-  local __identifiersecond=${__second[3]}
-  if [[ -n "$__identifierfirst" ]] && [[ -n "$__identifiersecond" ]]; then
-    if [[ "$__identifierfirst" > "$__identifiersecond" ]]; then
-      eval "$lv=1"
-      return 0
-    elif [[ "$__identifierfirst" < "$__identifiersecond" ]]; then
-      eval "$lv=-1"
-      return 0
-    fi
-  elif [[ -z "$__identifierfirst" ]] && [[ -n "$__identifiersecond" ]]; then
-    eval "$lv=1"
-    return 0
-  elif [[ -n "$__identifierfirst" ]] && [[ -z "$__identifiersecond" ]]; then
-    eval "$lv=-1"
-    return 0
-  fi
-
-  eval "$lv=0"
-}
-
-# Returns the last version of two
-# $1 The first version to compare
-# $2 The second version to compare
-# $3 The variable where to store the last one
-function get_latest_of_two {
-  local __first=$1
-  local __second=$2
-  local __result
-  local __latest=$3
-  compare_versions $__first $__second __result
-  case $__result in
-    0)
-      eval "$__latest=$__second"
-      ;;
-    -1)
-      eval "$__latest=$__second"
-      ;;
-    1)
-      eval "$__latest=$__first"
-      ;;
-  esac
-}
-
-# Assigns a 2 size array with the identifier, having the identifier at pos 0, and the number in pos 1
-# $1 The identifier in the format -id.#
-# $2 The vferiable where to store the 2 size array
-function explode_identifier {
-  local __identifier=$1
-  local __result=$2
-  if [[ $__identifier =~ $IDENTIFIER_REGEX ]] ; then
-    local __id=${BASH_REMATCH[1]}
-    local __number=${BASH_REMATCH[2]}
-    if [[ -z "$__number" ]]; then
-      __number=1
-    fi
-    eval "$__result=(\"$__id\" \"$__number\")"
-  else
-    eval "$__result="
-  fi
-}
-
-# Gets a list of tags and assigns the base and latest versions
-# Receives an array with the tags containing the versions
-# Assigns to the global variables finalversion and lastversion the final version and the latest version
-function get_latest {
-  local __taglist=("$@")
-  local __tagsnumber=${#__taglist[@]}
-  local __current
-  case $__tagsnumber in
-    0)
-      finalversion=$FIRST_VERSION
-      lastversion=$FIRST_VERSION
-      ;;
-    1)
-      __current=${__taglist[0]}
-      explode_version $__current ver
-      if [ -n "$ver" ]; then
-        if [ -n "${ver[3]}" ]; then
-          finalversion=$FIRST_VERSION
-        else
-          finalversion=$__current
-        fi
-        lastversion=$__current
-      else
-        finalversion=$FIRST_VERSION
-        lastversion=$FIRST_VERSION
-      fi
-      ;;
-    *)
-      local __lastpos=$(($__tagsnumber-1))
-      for i in $(seq 0 $__lastpos)
-      do
-        __current=${__taglist[i]}
-        explode_version ${__taglist[i]} ver
-        if [ -n "$ver" ]; then
-          if [ -z "${ver[3]}" ]; then
-            get_latest_of_two $finalversion $__current finalversion
-            get_latest_of_two $lastversion $finalversion lastversion
-          else
-            get_latest_of_two $lastversion $__current lastversion
-          fi
-        fi
-      done
-      ;;
-  esac
-
-  if git rev-parse -q --verify "refs/tags/$lastversion" >/dev/null; then
-    hasversiontag="true"
-  else
-    hasversiontag="false"
-  fi
-}
-
-# Gets the next version given the provided scope
-# $1 The version that is going to be bumped
-# $2 The scope to bump
-# $3 The variable where to stoer the result
-function get_next_version {
-  local __exploded
-  local __fromversion=$1
-  local __scope=$2
-  local __result=$3
-  explode_version $__fromversion __exploded
-  case $__scope in
-    major)
-      __exploded[0]=$((${__exploded[0]}+1))
-      __exploded[1]=0
-      __exploded[2]=0
-    ;;
-    minor)
-      __exploded[1]=$((${__exploded[1]}+1))
-      __exploded[2]=0
-    ;;
-    patch)
-      __exploded[2]=$((${__exploded[2]}+1))
-    ;;
-  esac
-
-  eval "$__result=v${__exploded[0]}.${__exploded[1]}.${__exploded[2]}"
-}
-
-function bump_version {
-  ## First we try to get the next version based on the existing last one
-  if [ "$scope" == "auto" ]; then
-    get_scope_auto scope
-  fi
-
-  local __candidatefromlast=$FIRST_VERSION
-  local __explodedlast
-  explode_version $lastversion __explodedlast
-  if [[ -n "${__explodedlast[3]}" ]]; then
-    # Last version is not final
-    local __idlast
-    explode_identifier ${__explodedlast[3]} __idlast
-
-    # We get the last, given the desired id based on the scope
-    __candidatefromlast="v${__explodedlast[0]}.${__explodedlast[1]}.${__explodedlast[2]}"
-    if [[ -n "$identifier" ]]; then
-      local __nextid="$identifier.1"
-      if [ "$identifier" == "${__idlast[0]}" ]; then
-        # We target the same identifier as the last so we increase one
-        __nextid="$identifier.$(( ${__idlast[1]}+1 ))"
-        __candidatefromlast="$__candidatefromlast-$__nextid"
-      else
-        # Different identifiers, we make sure we are assigning a higher identifier, if not, we increase the version
-        __candidatefromlast="$__candidatefromlast-$__nextid"
-        local __comparedwithlast
-        compare_versions $__candidatefromlast $lastversion __comparedwithlast
-        if [ "$__comparedwithlast" == -1 ]; then
-          get_next_version $__candidatefromlast $scope __candidatefromlast
-          __candidatefromlast="$__candidatefromlast-$__nextid"
-        fi
-      fi
-    fi
-  fi
-
-  # Then we try to get the version based on the latest final one
-  local __candidatefromfinal=$FIRST_VERSION
-  get_next_version $finalversion $scope __candidatefromfinal
-  if [[ -n "$identifier" ]]; then
-    __candidatefromfinal="$__candidatefromfinal-$identifier.1"
-  fi
-
-  # Finally we compare both candidates
-  local __resultversion
-  local __result
-  compare_versions $__candidatefromlast $__candidatefromfinal __result
-  case $__result in
-    0)
-      __resultversion=$__candidatefromlast
-      ;;
-    -1)
-      __resultversion="$__candidatefromfinal"
-      ;;
-    1)
-      __resultversion=$__candidatefromlast
-      ;;
-  esac
-
-  eval "$1=$__resultversion"
-}
-
-function increase_version {
-  local __version=
-
-  if [ -z $forcedversion ]; then
-    bump_version __version
-  else
-    if [[ $forcedversion =~ $SEMVER_REGEX ]] ; then
-      compare_versions $forcedversion $lastversion __result
-      if [ $__result -le 0 ]; then
-        echo "Version can't be lower than last version: $lastversion"
-        exit 1
-      fi
-    else
-      echo "Non valid version to bump"
-      exit 1
-    fi
-    __version=$forcedversion
-  fi
-
-  if [ "$displayonly" == "true" ]; then
-    echo "$__version"
-  else
-    if [ "$forcetag" == "false" ]; then
-      check_git_dirty_status
-    fi
-    local __commitlist
-    if [ "$finalversion" == "$FIRST_VERSION" ] || [ "$hasversiontag" != "true" ]; then
-      __commitlist="$(git log --pretty=oneline | cat)"
-    else
-      __commitlist="$(git log --pretty=oneline $finalversion... | cat)"
-    fi
-
-    # If we are forcing a bump, we add bump to the commit list
-    if [[ -z $__commitlist && "$forcetag" == "true" ]]; then
-      __commitlist="bump"
-    fi
-
-    if [[ -z $__commitlist ]]; then
-      echo "No commits since the last final version, not bumping version"
-    else
-      if [[ -z $versionname ]]; then
-        versionname=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
-      fi
-      local __message="$versionname
-$__commitlist"
-
-      # We check we have info on the user
-      local __username=$(git config user.name)
-      if [ -z "$__username" ]; then
-        __username=$(id -u -n)
-        git config user.name $__username
-      fi
-      local __useremail=$(git config user.email)
-      if [ -z "$__useremail" ]; then
-        __useremail=$(hostname)
-        git config user.email "$__username@$__useremail"
-      fi
-
-      git tag -a $__version -m "$__message"
-
-      # If we have a remote, we push there
-      local __remotes=$(git remote)
-      if [[ -n $__remotes ]]; then
-        for __remote in $__remotes; do
-          git push $__remote $__version > /dev/null
-          if [ $? -eq 0 ]; then
-            echo "$__version pushed to $__remote"
-          else
-            echo "Error pushing the tag $__version to $__remote"
-            exit 1
-          fi
-        done
-      else
-        echo "$__version"
-      fi
-    fi
-  fi
-}
-
-function check_git_dirty_status {
-  local __repostatus=
-  get_work_tree_status __repostatus
-
-  if [ "$__repostatus" == "uncommitted" ]; then
-    echo "ERROR: You have uncommitted changes"
-    git status --porcelain
-    exit 1
-  fi
-
-  if [ "$__repostatus" == "unstaged" ]; then
-    echo "ERROR: You have unstaged changes"
-    git status --porcelain
-    exit 1
-  fi
-}
-
-# Get the total amount of lines of code in the repo
-function get_total_lines {
-  local __empty_id="$(git hash-object -t tree /dev/null)"
-  local __changes="$(git diff --numstat $__empty_id | cat)"
-  local __added_deleted=$1
-  get_changed_lines "$__changes" $__added_deleted
-}
-
-# Get the total amount of lines of code since the provided tag
-function get_sincetag_lines {
-  local __sincetag=$1
-  local __changes="$(git diff --numstat $__sincetag | cat)"
-  local __added_deleted=$2
-  get_changed_lines "$__changes" $__added_deleted
-}
-
-function get_changed_lines {
-  local __changes_numstat=$1
-  local __result=$2
-  IFS=$'\n' read -rd '' -a __changes_array <<<"$__changes_numstat"
-  local __diff_regex="^([0-9]+)[[:space:]]+([0-9]+)[[:space:]]+.+$"
-
-  local __total_added=0
-  local __total_deleted=0
-  for i in "${__changes_array[@]}"
-  do
-    if [[ $i =~ $__diff_regex ]] ; then
-      local __added=${BASH_REMATCH[1]}
-      local __deleted=${BASH_REMATCH[2]}
-      __total_added=$(( $__total_added+$__added ))
-      __total_deleted=$(( $__total_deleted+$__deleted ))
-    fi
-  done
-  eval "$2=( $__total_added $__total_deleted )"
-}
-
-function get_scope_auto {
-  local __verbose=$2
-  local __total=0
-  local __since=0
-  local __scope=
-
-  get_total_lines __total
-  get_sincetag_lines $finalversion __since
-
-  local __percentage=0
-  if [ "$__total" != "0" ]; then
-    local __percentage=$(( 100*$__since/$__total ))
-    if [ $__percentage -gt "10" ]; then
-      __scope="minor"
-    else
-      __scope="patch"
-    fi
-  fi
-
-  eval "$1=$__scope"
-  if [[ -n "$__verbose" ]]; then
-    echo "[Auto Scope] Percentage of lines changed: $__percentage"
-    echo "[Auto Scope] : $__scope"
-  fi
-}
-
-function get_work_tree_status {
-  # Update the index
-  git update-index -q --ignore-submodules --refresh > /dev/null
-  eval "$1="
-
-  if ! git diff-files --quiet --ignore-submodules -- > /dev/null
-  then
-    eval "$1=unstaged"
-  fi
-
-  if ! git diff-index --cached --quiet HEAD --ignore-submodules -- > /dev/null
-  then
-    eval "$1=uncommitted"
-  fi
-}
-
-function get_current {
-  if [ "$hasversiontag" == "true" ]; then
-    local __commitcount="$(git rev-list $lastversion.. --count)"
-  else
-    local __commitcount="$(git rev-list --count HEAD)"
-  fi
-  local __status=
-  get_work_tree_status __status
-
-  if [ "$__commitcount" == "0" ] && [ -z "$__status" ]; then
-    eval "$1=$lastversion"
-  else
-    local __buildinfo="$(git rev-parse --short HEAD)"
-    local __currentbranch="$(git rev-parse --abbrev-ref HEAD)"
-    if [ "$__currentbranch" != "master" ]; then
-      __buildinfo="$__currentbranch.$__buildinfo"
-    fi
-
-    local __suffix=
-    if [ "$__commitcount" != "0" ]; then
-      if [ -n "$__suffix" ]; then
-        __suffix="$__suffix."
-      fi
-      __suffix="$__suffix$__commitcount"
-    fi
-    if [ -n "$__status" ]; then
-      if [ -n "$__suffix" ]; then
-        __suffix="$__suffix."
-      fi
-      __suffix="$__suffix$__status"
-    fi
-
-    __suffix="$__suffix+$__buildinfo"
-    if [ "$lastversion" == "$finalversion" ]; then
-      scope="patch"
-      identifier=
-      local __bumped=
-      bump_version __bumped
-      eval "$1=$__bumped-dev.$__suffix"
-    else
-      eval "$1=$lastversion.$__suffix"
-    fi
-  fi
-}
-
-function init {
-  git fetch > /dev/null
-  TAGS="$(git tag)"
-  IFS=$'\n' read -rd '' -a TAG_ARRAY <<<"$TAGS"
-
-  get_latest ${TAG_ARRAY[@]}
-  currentbranch="$(git rev-parse --abbrev-ref HEAD)"
-}
-
-case $ACTION in
-  --help)
-    echo -e "$HELP"
-    ;;
-  --version)
-    echo -e "${PROG}: $PROG_VERSION"
-    ;;
-  final)
-    init
-    diff=$(git diff master | cat)
-    if [ "$forcetag" == "false" ]; then
-      if [ -n "$diff" ]; then
-        echo "ERROR: Branch must be updated with master for final versions"
-        exit 1
-      fi
-    fi
-    increase_version
-    ;;
-  alpha|beta)
-    init
-    identifier="$ACTION"
-    increase_version
-    ;;
-  candidate)
-    init
-    identifier="rc"
-    increase_version
-    ;;
-  getlast)
-    init
-    echo "$lastversion"
-    ;;
-  getfinal)
-    init
-    echo "$finalversion"
-    ;;
-  getcurrent)
-    init
-    get_current current
-    echo "$current"
-    ;;
-  get)
-    init
-    echo "Current final version: $finalversion"
-    echo "Last tagged version:   $lastversion"
-    ;;
-  *)
-    echo "'$ACTION' is not a valid command, see --help for available commands."
-    ;;
-esac
diff --git a/variables.tf b/variables.tf
index b7d560e402..5e23df6c98 100644
--- a/variables.tf
+++ b/variables.tf
@@ -1,395 +1,1471 @@
-variable "cluster_enabled_log_types" {
-  default     = []
-  description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
-  type        = list(string)
+variable "create" {
+  description = "Controls if resources should be created (affects nearly all resources)"
+  type        = bool
+  default     = true
 }
 
-variable "cluster_log_kms_key_id" {
-  default     = ""
-  description = "If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)"
+variable "prefix_separator" {
+  description = "The separator to use between the prefix and the generated timestamp for resource names"
   type        = string
+  default     = "-"
 }
 
-variable "cluster_log_retention_in_days" {
-  default     = 90
-  description = "Number of days to retain log events. Default retention - 90 days."
-  type        = number
+variable "region" {
+  description = "Region where the resource(s) will be managed. Defaults to the Region set in the provider configuration"
+  type        = string
+  default     = null
 }
 
-variable "cluster_name" {
-  description = "Name of the EKS cluster. Also used as a prefix in names of related resources."
-  type        = string
+variable "tags" {
+  description = "A map of tags to add to all resources"
+  type        = map(string)
+  default     = {}
 }
 
-variable "cluster_security_group_id" {
-  description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers"
+################################################################################
+# Cluster
+################################################################################
+
+variable "name" {
+  description = "Name of the EKS cluster"
   type        = string
   default     = ""
 }
 
-variable "cluster_version" {
-  description = "Kubernetes version to use for the EKS cluster."
+variable "kubernetes_version" {
+  description = "Kubernetes `<major>.<minor>` version to use for the EKS cluster (i.e.: `1.33`)"
   type        = string
+  default     = null
 }
 
-variable "kubeconfig_output_path" {
-  description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`."
-  type        = string
-  default     = "./"
+variable "enabled_log_types" {
+  description = "A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
+  type        = list(string)
+  default     = ["audit", "api", "authenticator"]
+}
+
+variable "force_update_version" {
+  description = "Force version update by overriding upgrade-blocking readiness checks when updating a cluster"
+  type        = bool
+  default     = null
 }
 
-variable "kubeconfig_file_permission" {
-  description = "File permission of the Kubectl config file containing cluster configuration saved to `kubeconfig_output_path.`"
+variable "authentication_mode" {
+  description = "The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP`"
   type        = string
-  default     = "0600"
+  default     = "API_AND_CONFIG_MAP"
+}
+
+variable "compute_config" {
+  description = "Configuration block for the cluster compute configuration"
+  type = object({
+    enabled       = optional(bool, false)
+    node_pools    = optional(list(string))
+    node_role_arn = optional(string)
+  })
+  default = null
+}
+
+variable "upgrade_policy" {
+  description = "Configuration block for the cluster upgrade policy"
+  type = object({
+    support_type = optional(string)
+  })
+  default = null
+}
+
+variable "remote_network_config" {
+  description = "Configuration block for the cluster remote network configuration"
+  type = object({
+    remote_node_networks = object({
+      cidrs = optional(list(string))
+    })
+    remote_pod_networks = optional(object({
+      cidrs = optional(list(string))
+    }))
+  })
+  default = null
+}
+
+variable "zonal_shift_config" {
+  description = "Configuration block for the cluster zonal shift"
+  type = object({
+    enabled = optional(bool)
+  })
+  default = null
+}
+
+variable "additional_security_group_ids" {
+  description = "List of additional, externally created security group IDs to attach to the cluster control plane"
+  type        = list(string)
+  default     = []
 }
 
-variable "write_kubeconfig" {
-  description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `kubeconfig_output_path`."
-  type        = bool
-  default     = true
+variable "control_plane_subnet_ids" {
+  description = "A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane"
+  type        = list(string)
+  default     = []
+}
+
+variable "subnet_ids" {
+  description = "A list of subnet IDs where the nodes/node groups will be provisioned. If `control_plane_subnet_ids` is not provided, the EKS cluster control plane (ENIs) will be provisioned in these subnets"
+  type        = list(string)
+  default     = []
 }
 
-variable "manage_aws_auth" {
-  description = "Whether to apply the aws-auth configmap file."
+variable "endpoint_private_access" {
+  description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled"
+  type        = bool
   default     = true
 }
 
-variable "aws_auth_additional_labels" {
-  description = "Additional kubernetes labels applied on aws-auth ConfigMap"
-  default     = {}
-  type        = map(string)
+variable "endpoint_public_access" {
+  description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled"
+  type        = bool
+  default     = false
 }
 
-variable "map_accounts" {
-  description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format."
+variable "endpoint_public_access_cidrs" {
+  description = "List of CIDR blocks which can access the Amazon EKS public API server endpoint"
   type        = list(string)
-  default     = []
+  default     = ["0.0.0.0/0"]
 }
 
-variable "map_roles" {
-  description = "Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format."
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-  default = []
+variable "ip_family" {
+  description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created"
+  type        = string
+  default     = "ipv4"
 }
 
-variable "map_users" {
-  description = "Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format."
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-  default = []
+variable "service_ipv4_cidr" {
+  description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks"
+  type        = string
+  default     = null
 }
 
-variable "subnets" {
-  description = "A list of subnets to place the EKS cluster and workers within."
-  type        = list(string)
+variable "service_ipv6_cidr" {
+  description = "The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster"
+  type        = string
+  default     = null
 }
 
-variable "tags" {
-  description = "A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only."
+variable "outpost_config" {
+  description = "Configuration for the AWS Outpost to provision the cluster on"
+  type = object({
+    control_plane_instance_type = optional(string)
+    control_plane_placement = optional(object({
+      group_name = string
+    }))
+    outpost_arns = list(string)
+  })
+  default = null
+}
+
+variable "encryption_config" {
+  description = "Configuration block with encryption configuration for the cluster"
+  type = object({
+    provider_key_arn = optional(string)
+    resources        = optional(list(string), ["secrets"])
+  })
+  default = {}
+}
+
+variable "attach_encryption_policy" {
+  description = "Indicates whether or not to attach an additional policy for the cluster IAM role to utilize the encryption key provided"
+  type        = bool
+  default     = true
+}
+
+variable "cluster_tags" {
+  description = "A map of additional tags to add to the cluster"
   type        = map(string)
   default     = {}
 }
 
-variable "vpc_id" {
-  description = "VPC where the cluster and workers will be deployed."
-  type        = string
+variable "create_primary_security_group_tags" {
+  description = "Indicates whether or not to tag the cluster's primary security group. This security group is created by the EKS service, not the module, and therefore tagging is handled after cluster creation"
+  type        = bool
+  default     = true
 }
 
-variable "worker_groups" {
-  description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys."
-  type        = any
-  default     = []
+variable "timeouts" {
+  description = "Create, update, and delete timeout configurations for the cluster"
+  type = object({
+    create = optional(string)
+    update = optional(string)
+    delete = optional(string)
+  })
+  default = null
+}
+
+################################################################################
+# Access Entry
+################################################################################
+
+variable "access_entries" {
+  description = "Map of access entries to add to the cluster"
+  type = map(object({
+    # Access entry
+    kubernetes_groups = optional(list(string))
+    principal_arn     = string
+    type              = optional(string, "STANDARD")
+    user_name         = optional(string)
+    tags              = optional(map(string), {})
+    # Access policy association
+    policy_associations = optional(map(object({
+      policy_arn = string
+      access_scope = object({
+        namespaces = optional(list(string))
+        type       = string
+      })
+    })))
+  }))
+  default = {}
 }
 
-variable "workers_group_defaults" {
-  description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys."
-  type        = any
-  default     = {}
+variable "enable_cluster_creator_admin_permissions" {
+  description = "Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry"
+  type        = bool
+  default     = false
 }
 
-variable "worker_groups_launch_template" {
-  description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys."
-  type        = any
-  default     = []
+################################################################################
+# KMS Key
+################################################################################
+
+variable "create_kms_key" {
+  description = "Controls if a KMS key for cluster encryption should be created"
+  type        = bool
+  default     = true
 }
 
-variable "worker_security_group_id" {
-  description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster."
+variable "kms_key_description" {
+  description = "The description of the key as viewed in AWS console"
   type        = string
-  default     = ""
+  default     = null
 }
 
-variable "worker_ami_name_filter" {
-  description = "Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used."
-  type        = string
-  default     = ""
+variable "kms_key_deletion_window_in_days" {
+  description = "The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. If you specify a value, it must be between `7` and `30`, inclusive. If you do not specify a value, it defaults to `30`"
+  type        = number
+  default     = null
 }
 
-variable "worker_ami_name_filter_windows" {
-  description = "Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster_version' is used."
-  type        = string
-  default     = ""
+variable "enable_kms_key_rotation" {
+  description = "Specifies whether key rotation is enabled"
+  type        = bool
+  default     = true
 }
 
-variable "worker_ami_owner_id" {
-  description = "The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')."
-  type        = string
-  default     = "amazon"
+variable "kms_key_enable_default_policy" {
+  description = "Specifies whether to enable the default key policy"
+  type        = bool
+  default     = true
 }
 
-variable "worker_ami_owner_id_windows" {
-  description = "The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft')."
-  type        = string
-  default     = "amazon"
+variable "kms_key_owners" {
+  description = "A list of IAM ARNs for those who will have full key permissions (`kms:*`)"
+  type        = list(string)
+  default     = []
 }
 
-variable "worker_additional_security_group_ids" {
-  description = "A list of additional security group ids to attach to worker instances"
+variable "kms_key_administrators" {
+  description = "A list of IAM ARNs for [key administrators](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-administrators). If no value is provided, the current caller identity is used to ensure at least one key admin is available"
   type        = list(string)
   default     = []
 }
 
-variable "worker_sg_ingress_from_port" {
-  description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)."
-  type        = number
-  default     = 1025
+variable "kms_key_users" {
+  description = "A list of IAM ARNs for [key users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-users)"
+  type        = list(string)
+  default     = []
 }
 
-variable "workers_additional_policies" {
-  description = "Additional policies to be added to workers"
+variable "kms_key_service_users" {
+  description = "A list of IAM ARNs for [key service users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-service-integration)"
   type        = list(string)
   default     = []
 }
 
-variable "kubeconfig_aws_authenticator_command" {
-  description = "Command to use to fetch AWS EKS credentials."
-  type        = string
-  default     = "aws-iam-authenticator"
+variable "kms_key_source_policy_documents" {
+  description = "List of IAM policy documents that are merged together into the exported document. Statements must have unique `sid`s"
+  type        = list(string)
+  default     = []
 }
 
-variable "kubeconfig_aws_authenticator_command_args" {
-  description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]."
+variable "kms_key_override_policy_documents" {
+  description = "List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid`"
   type        = list(string)
   default     = []
 }
 
-variable "kubeconfig_aws_authenticator_additional_args" {
-  description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]."
+variable "kms_key_aliases" {
+  description = "A list of aliases to create. Note - due to the use of `toset()`, values must be static strings and not computed values"
   type        = list(string)
   default     = []
 }
 
-variable "kubeconfig_aws_authenticator_env_variables" {
-  description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}."
+################################################################################
+# CloudWatch Log Group
+################################################################################
+
+variable "create_cloudwatch_log_group" {
+  description = "Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled"
+  type        = bool
+  default     = true
+}
+
+variable "cloudwatch_log_group_retention_in_days" {
+  description = "Number of days to retain log events. Default retention - 90 days"
+  type        = number
+  default     = 90
+}
+
+variable "cloudwatch_log_group_kms_key_id" {
+  description = "If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)"
+  type        = string
+  default     = null
+}
+
+variable "cloudwatch_log_group_class" {
+  description = "Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`"
+  type        = string
+  default     = null
+}
+
+variable "cloudwatch_log_group_tags" {
+  description = "A map of additional tags to add to the cloudwatch log group created"
   type        = map(string)
   default     = {}
 }
 
-variable "kubeconfig_name" {
-  description = "Override the default name used for items kubeconfig."
+################################################################################
+# Cluster Security Group
+################################################################################
+
+variable "create_security_group" {
+  description = "Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default"
+  type        = bool
+  default     = true
+}
+
+variable "security_group_id" {
+  description = "Existing security group ID to be attached to the cluster"
   type        = string
   default     = ""
 }
 
-variable "cluster_create_timeout" {
-  description = "Timeout value when creating the EKS cluster."
+variable "vpc_id" {
+  description = "ID of the VPC where the cluster security group will be provisioned"
   type        = string
-  default     = "30m"
+  default     = null
 }
 
-variable "cluster_delete_timeout" {
-  description = "Timeout value when deleting the EKS cluster."
+variable "security_group_name" {
+  description = "Name to use on cluster security group created"
   type        = string
-  default     = "15m"
+  default     = null
 }
 
-variable "cluster_create_security_group" {
-  description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`."
+variable "security_group_use_name_prefix" {
+  description = "Determines whether cluster security group name (`cluster_security_group_name`) is used as a prefix"
   type        = bool
   default     = true
 }
 
-variable "worker_create_security_group" {
-  description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`."
-  type        = bool
-  default     = true
+variable "security_group_description" {
+  description = "Description of the cluster security group created"
+  type        = string
+  default     = "EKS cluster security group"
+}
+
+variable "security_group_additional_rules" {
+  description = "List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source"
+  type = map(object({
+    protocol                   = optional(string, "tcp")
+    from_port                  = number
+    to_port                    = number
+    type                       = optional(string, "ingress")
+    description                = optional(string)
+    cidr_blocks                = optional(list(string))
+    ipv6_cidr_blocks           = optional(list(string))
+    prefix_list_ids            = optional(list(string))
+    self                       = optional(bool)
+    source_node_security_group = optional(bool, false)
+    source_security_group_id   = optional(string)
+  }))
+  default = {}
 }
 
-variable "worker_create_initial_lifecycle_hooks" {
-  description = "Whether to create initial lifecycle hooks provided in worker groups."
+variable "security_group_tags" {
+  description = "A map of additional tags to add to the cluster security group created"
+  type        = map(string)
+  default     = {}
+}
+
+################################################################################
+# EKS IPV6 CNI Policy
+################################################################################
+
+variable "create_cni_ipv6_iam_policy" {
+  description = "Determines whether to create an [`AmazonEKS_CNI_IPv6_Policy`](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy)"
   type        = bool
   default     = false
 }
 
-variable "worker_create_cluster_primary_security_group_rules" {
-  description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group."
+################################################################################
+# Node Security Group
+################################################################################
+
+variable "create_node_security_group" {
+  description = "Determines whether to create a security group for the node groups or use the existing `node_security_group_id`"
   type        = bool
-  default     = false
+  default     = true
 }
 
-variable "permissions_boundary" {
-  description = "If provided, all IAM roles will be created with this permissions boundary attached."
+variable "node_security_group_id" {
+  description = "ID of an existing security group to attach to the node groups created"
   type        = string
-  default     = null
+  default     = ""
 }
 
-variable "iam_path" {
-  description = "If provided, all IAM roles will be created on this path."
+variable "node_security_group_name" {
+  description = "Name to use on node security group created"
   type        = string
-  default     = "/"
+  default     = null
 }
 
-variable "cluster_create_endpoint_private_access_sg_rule" {
-  description = "Whether to create security group rules for the access to the Amazon EKS private API server endpoint. When is `true`, `cluster_endpoint_private_access_cidrs` must be setted."
+variable "node_security_group_use_name_prefix" {
+  description = "Determines whether node security group name (`node_security_group_name`) is used as a prefix"
   type        = bool
-  default     = false
+  default     = true
 }
 
-variable "cluster_endpoint_private_access_cidrs" {
-  description = "List of CIDR blocks which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`."
-  type        = list(string)
-  default     = null
+variable "node_security_group_description" {
+  description = "Description of the node security group created"
+  type        = string
+  default     = "EKS node shared security group"
+}
+
+variable "node_security_group_additional_rules" {
+  description = "List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source"
+  type = map(object({
+    protocol                      = optional(string, "tcp")
+    from_port                     = number
+    to_port                       = number
+    type                          = optional(string, "ingress")
+    description                   = optional(string)
+    cidr_blocks                   = optional(list(string))
+    ipv6_cidr_blocks              = optional(list(string))
+    prefix_list_ids               = optional(list(string))
+    self                          = optional(bool)
+    source_cluster_security_group = optional(bool, false)
+    source_security_group_id      = optional(string)
+  }))
+  default = {}
 }
 
-variable "cluster_endpoint_private_access_sg" {
-  description = "List of security group IDs which can access the Amazon EKS private API server endpoint. To use this `cluster_endpoint_private_access` and `cluster_create_endpoint_private_access_sg_rule` must be set to `true`."
-  type        = list(string)
-  default     = null
+variable "node_security_group_enable_recommended_rules" {
+  description = "Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic"
+  type        = bool
+  default     = true
 }
 
-variable "cluster_endpoint_private_access" {
-  description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled."
-  type        = bool
-  default     = false
+variable "node_security_group_tags" {
+  description = "A map of additional tags to add to the node security group created"
+  type        = map(string)
+  default     = {}
 }
 
-variable "cluster_endpoint_public_access" {
-  description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. When it's set to `false` ensure to have a proper private access with `cluster_endpoint_private_access = true`."
+################################################################################
+# IRSA
+################################################################################
+
+variable "enable_irsa" {
+  description = "Determines whether to create an OpenID Connect Provider for EKS to enable IRSA"
   type        = bool
   default     = true
 }
 
-variable "cluster_endpoint_public_access_cidrs" {
-  description = "List of CIDR blocks which can access the Amazon EKS public API server endpoint."
+variable "openid_connect_audiences" {
+  description = "List of OpenID Connect audience client IDs to add to the IRSA provider"
   type        = list(string)
-  default     = ["0.0.0.0/0"]
+  default     = []
 }
 
-variable "manage_cluster_iam_resources" {
-  description = "Whether to let the module manage cluster IAM resources. If set to false, cluster_iam_role_name must be specified."
+variable "include_oidc_root_ca_thumbprint" {
+  description = "Determines whether to include the root CA thumbprint in the OpenID Connect (OIDC) identity provider's server certificate(s)"
   type        = bool
   default     = true
 }
 
-variable "cluster_iam_role_name" {
-  description = "IAM role name for the cluster. If manage_cluster_iam_resources is set to false, set this to reuse an existing IAM role. If manage_cluster_iam_resources is set to true, set this to force the created role name."
-  type        = string
-  default     = ""
+variable "custom_oidc_thumbprints" {
+  description = "Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s)"
+  type        = list(string)
+  default     = []
 }
 
-variable "manage_worker_iam_resources" {
-  description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers."
+################################################################################
+# Cluster IAM Role
+################################################################################
+
+variable "create_iam_role" {
+  description = "Determines whether an IAM role is created for the cluster"
   type        = bool
   default     = true
 }
 
-variable "workers_role_name" {
-  description = "User defined workers role name."
+variable "iam_role_arn" {
+  description = "Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false`"
   type        = string
-  default     = ""
+  default     = null
 }
 
-variable "attach_worker_cni_policy" {
-  description = "Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster."
-  type        = bool
-  default     = true
+variable "iam_role_name" {
+  description = "Name to use on IAM role created"
+  type        = string
+  default     = null
 }
 
-variable "create_eks" {
-  description = "Controls if EKS resources should be created (it affects almost all resources)"
+variable "iam_role_use_name_prefix" {
+  description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix"
   type        = bool
   default     = true
 }
 
-variable "node_groups_defaults" {
-  description = "Map of values to be applied to all node groups. See `node_groups` module's documentation for more details"
-  type        = any
+variable "iam_role_path" {
+  description = "The IAM role path"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_description" {
+  description = "Description of the role"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_permissions_boundary" {
+  description = "ARN of the policy that is used to set the permissions boundary for the IAM role"
+  type        = string
+  default     = null
+}
+
+variable "iam_role_additional_policies" {
+  description = "Additional policies to be added to the IAM role"
+  type        = map(string)
   default     = {}
 }
 
-variable "node_groups" {
-  description = "Map of map of node groups to create. See `node_groups` module's documentation for more details"
-  type        = any
+variable "iam_role_tags" {
+  description = "A map of additional tags to add to the IAM role created"
+  type        = map(string)
   default     = {}
 }
 
-variable "enable_irsa" {
-  description = "Whether to create OpenID Connect Provider for EKS to enable IRSA"
+variable "encryption_policy_use_name_prefix" {
+  description = "Determines whether cluster encryption policy name (`cluster_encryption_policy_name`) is used as a prefix"
   type        = bool
-  default     = false
+  default     = true
 }
 
-variable "eks_oidc_root_ca_thumbprint" {
+variable "encryption_policy_name" {
+  description = "Name to use on cluster encryption policy created"
   type        = string
-  description = "Thumbprint of Root CA for EKS OIDC, Valid until 2037"
-  default     = "9e99a48a9960b14926bb7f3b02e22da2b0ab7280"
+  default     = null
 }
 
-variable "cluster_encryption_config" {
-  description = "Configuration block with encryption configuration for the cluster. See examples/secrets_encryption/main.tf for example format"
-  type = list(object({
-    provider_key_arn = string
-    resources        = list(string)
-  }))
-  default = []
+variable "encryption_policy_description" {
+  description = "Description of the cluster encryption policy created"
+  type        = string
+  default     = "Cluster encryption policy to allow cluster role to utilize CMK provided"
 }
 
-variable "fargate_profiles" {
-  description = "Fargate profiles to create. See `fargate_profile` keys section in fargate submodule's README.md for more details"
-  type        = any
+variable "encryption_policy_path" {
+  description = "Cluster encryption policy path"
+  type        = string
+  default     = null
+}
+
+variable "encryption_policy_tags" {
+  description = "A map of additional tags to add to the cluster encryption policy created"
+  type        = map(string)
   default     = {}
 }
 
-variable "create_fargate_pod_execution_role" {
-  description = "Controls if the EKS Fargate pod execution IAM role should be created."
+variable "dataplane_wait_duration" {
+  description = "Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed node group(s), self-managed node group(s), Fargate profile(s))"
+  type        = string
+  default     = "30s"
+}
+
+variable "enable_auto_mode_custom_tags" {
+  description = "Determines whether to enable permissions for custom tags resources created by EKS Auto Mode"
+  type        = bool
+  default     = true
+}
+
+################################################################################
+# EKS Addons
+################################################################################
+
+variable "addons" {
+  description = "Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name`"
+  type = map(object({
+    name                 = optional(string) # will fall back to map key
+    before_compute       = optional(bool, false)
+    most_recent          = optional(bool, true)
+    addon_version        = optional(string)
+    configuration_values = optional(string)
+    pod_identity_association = optional(list(object({
+      role_arn        = string
+      service_account = string
+    })))
+    preserve                    = optional(bool, true)
+    resolve_conflicts_on_create = optional(string, "NONE")
+    resolve_conflicts_on_update = optional(string, "OVERWRITE")
+    service_account_role_arn    = optional(string)
+    timeouts = optional(object({
+      create = optional(string)
+      update = optional(string)
+      delete = optional(string)
+    }))
+    tags = optional(map(string), {})
+  }))
+  default = null
+}
+
+variable "addons_timeouts" {
+  description = "Create, update, and delete timeout configurations for the cluster addons"
+  type = object({
+    create = optional(string)
+    update = optional(string)
+    delete = optional(string)
+  })
+  default = null
+}
+
+################################################################################
+# EKS Identity Provider
+################################################################################
+
+variable "identity_providers" {
+  description = "Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA"
+  type = map(object({
+    client_id                     = string
+    groups_claim                  = optional(string)
+    groups_prefix                 = optional(string)
+    identity_provider_config_name = optional(string) # will fall back to map key
+    issuer_url                    = string
+    required_claims               = optional(map(string))
+    username_claim                = optional(string)
+    username_prefix               = optional(string)
+    tags                          = optional(map(string), {})
+  }))
+  default = null
+}
+
+################################################################################
+# EKS Auto Node IAM Role
+################################################################################
+
+variable "create_node_iam_role" {
+  description = "Determines whether an EKS Auto node IAM role is created"
   type        = bool
   default     = true
 }
 
-variable "fargate_pod_execution_role_name" {
-  description = "The IAM Role that provides permissions for the EKS Fargate Profile."
+variable "node_iam_role_name" {
+  description = "Name to use on the EKS Auto node IAM role created"
   type        = string
   default     = null
 }
 
-variable "cluster_service_ipv4_cidr" {
-  description = "service ipv4 cidr for the kubernetes cluster"
+variable "node_iam_role_use_name_prefix" {
+  description = "Determines whether the EKS Auto node IAM role name (`node_iam_role_name`) is used as a prefix"
+  type        = bool
+  default     = true
+}
+
+variable "node_iam_role_path" {
+  description = "The EKS Auto node IAM role path"
   type        = string
   default     = null
 }
 
-variable "cluster_egress_cidrs" {
-  description = "List of CIDR blocks that are permitted for cluster egress traffic."
-  type        = list(string)
-  default     = ["0.0.0.0/0"]
+variable "node_iam_role_description" {
+  description = "Description of the EKS Auto node IAM role"
+  type        = string
+  default     = null
 }
 
-variable "workers_egress_cidrs" {
-  description = "List of CIDR blocks that are permitted for workers egress traffic."
-  type        = list(string)
-  default     = ["0.0.0.0/0"]
+variable "node_iam_role_permissions_boundary" {
+  description = "ARN of the policy that is used to set the permissions boundary for the EKS Auto node IAM role"
+  type        = string
+  default     = null
 }
 
-variable "wait_for_cluster_timeout" {
-  description = "A timeout (in seconds) to wait for cluster to be available."
-  type        = number
-  default     = 300
+variable "node_iam_role_additional_policies" {
+  description = "Additional policies to be added to the EKS Auto node IAM role"
+  type        = map(string)
+  default     = {}
+}
+
+variable "node_iam_role_tags" {
+  description = "A map of additional tags to add to the EKS Auto node IAM role created"
+  type        = map(string)
+  default     = {}
+}
+
+################################################################################
+# Fargate
+################################################################################
+
+variable "fargate_profiles" {
+  description = "Map of Fargate Profile definitions to create"
+  type = map(object({
+    create = optional(bool)
+
+    # Fargate profile
+    name       = optional(string) # Will fall back to map key
+    subnet_ids = optional(list(string))
+    selectors = optional(list(object({
+      labels    = optional(map(string))
+      namespace = string
+    })))
+    timeouts = optional(object({
+      create = optional(string)
+      delete = optional(string)
+    }))
+
+    # IAM role
+    create_iam_role               = optional(bool)
+    iam_role_arn                  = optional(string)
+    iam_role_name                 = optional(string)
+    iam_role_use_name_prefix      = optional(bool)
+    iam_role_path                 = optional(string)
+    iam_role_description          = optional(string)
+    iam_role_permissions_boundary = optional(string)
+    iam_role_tags                 = optional(map(string))
+    iam_role_attach_cni_policy    = optional(bool)
+    iam_role_additional_policies  = optional(map(string))
+    create_iam_role_policy        = optional(bool)
+    iam_role_policy_statements = optional(list(object({
+      sid           = optional(string)
+      actions       = optional(list(string))
+      not_actions   = optional(list(string))
+      effect        = optional(string)
+      resources     = optional(list(string))
+      not_resources = optional(list(string))
+      principals = optional(list(object({
+        type        = string
+        identifiers = list(string)
+      })))
+      not_principals = optional(list(object({
+        type        = string
+        identifiers = list(string)
+      })))
+      condition = optional(list(object({
+        test     = string
+        values   = list(string)
+        variable = string
+      })))
+    })))
+    tags = optional(map(string))
+  }))
+  default = null
+}
+
+################################################################################
+# Self Managed Node Group
+################################################################################
+
+variable "self_managed_node_groups" {
+  description = "Map of self-managed node group definitions to create"
+  type = map(object({
+    create             = optional(bool)
+    kubernetes_version = optional(string)
+
+    # Autoscaling Group
+    create_autoscaling_group         = optional(bool)
+    name                             = optional(string) # Will fall back to map key
+    use_name_prefix                  = optional(bool)
+    availability_zones               = optional(list(string))
+    subnet_ids                       = optional(list(string))
+    min_size                         = optional(number)
+    max_size                         = optional(number)
+    desired_size                     = optional(number)
+    desired_size_type                = optional(string)
+    capacity_rebalance               = optional(bool)
+    default_instance_warmup          = optional(number)
+    protect_from_scale_in            = optional(bool)
+    context                          = optional(string)
+    create_placement_group           = optional(bool)
+    placement_group                  = optional(string)
+    health_check_type                = optional(string)
+    health_check_grace_period        = optional(number)
+    ignore_failed_scaling_activities = optional(bool)
+    force_delete                     = optional(bool)
+    termination_policies             = optional(list(string))
+    suspended_processes              = optional(list(string))
+    max_instance_lifetime            = optional(number)
+    enabled_metrics                  = optional(list(string))
+    metrics_granularity              = optional(string)
+    initial_lifecycle_hooks = optional(list(object({
+      default_result          = optional(string)
+      heartbeat_timeout       = optional(number)
+      lifecycle_transition    = string
+      name                    = string
+      notification_metadata   = optional(string)
+      notification_target_arn = optional(string)
+      role_arn                = optional(string)
+    })))
+    instance_maintenance_policy = optional(object({
+      max_healthy_percentage = number
+      min_healthy_percentage = number
+    }))
+    instance_refresh = optional(object({
+      preferences = optional(object({
+        alarm_specification = optional(object({
+          alarms = optional(list(string))
+        }))
+        auto_rollback                = optional(bool)
+        checkpoint_delay             = optional(number)
+        checkpoint_percentages       = optional(list(number))
+        instance_warmup              = optional(number)
+        max_healthy_percentage       = optional(number)
+        min_healthy_percentage       = optional(number)
+        scale_in_protected_instances = optional(string)
+        skip_matching                = optional(bool)
+        standby_instances            = optional(string)
+      }))
+      strategy = optional(string)
+      triggers = optional(list(string))
+    }))
+    use_mixed_instances_policy = optional(bool)
+    mixed_instances_policy = optional(object({
+      instances_distribution = optional(object({
+        on_demand_allocation_strategy            = optional(string)
+        on_demand_base_capacity                  = optional(number)
+        on_demand_percentage_above_base_capacity = optional(number)
+        spot_allocation_strategy                 = optional(string)
+        spot_instance_pools                      = optional(number)
+        spot_max_price                           = optional(string)
+      }))
+      launch_template = object({
+        override = optional(list(object({
+          instance_requirements = optional(object({
+            accelerator_count = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            accelerator_manufacturers = optional(list(string))
+            accelerator_names         = optional(list(string))
+            accelerator_total_memory_mib = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            accelerator_types      = optional(list(string))
+            allowed_instance_types = optional(list(string))
+            bare_metal             = optional(string)
+            baseline_ebs_bandwidth_mbps = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            burstable_performance                                   = optional(string)
+            cpu_manufacturers                                       = optional(list(string))
+            excluded_instance_types                                 = optional(list(string))
+            instance_generations                                    = optional(list(string))
+            local_storage                                           = optional(string)
+            local_storage_types                                     = optional(list(string))
+            max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)
+            memory_gib_per_vcpu = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            memory_mib = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            network_bandwidth_gbps = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            network_interface_count = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            on_demand_max_price_percentage_over_lowest_price = optional(number)
+            require_hibernate_support                        = optional(bool)
+            spot_max_price_percentage_over_lowest_price      = optional(number)
+            total_local_storage_gb = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+            vcpu_count = optional(object({
+              max = optional(number)
+              min = optional(number)
+            }))
+          }))
+          instance_type = optional(string)
+          launch_template_specification = optional(object({
+            launch_template_id   = optional(string)
+            launch_template_name = optional(string)
+            version              = optional(string)
+          }))
+          weighted_capacity = optional(string)
+        })))
+      })
+    }))
+    timeouts = optional(object({
+      delete = optional(string)
+    }))
+    autoscaling_group_tags = optional(map(string))
+    # User data
+    ami_type                   = optional(string)
+    additional_cluster_dns_ips = optional(list(string))
+    pre_bootstrap_user_data    = optional(string)
+    post_bootstrap_user_data   = optional(string)
+    bootstrap_extra_args       = optional(string)
+    user_data_template_path    = optional(string)
+    cloudinit_pre_nodeadm = optional(list(object({
+      content      = string
+      content_type = optional(string)
+      filename     = optional(string)
+      merge_type   = optional(string)
+    })))
+    cloudinit_post_nodeadm = optional(list(object({
+      content      = string
+      content_type = optional(string)
+      filename     = optional(string)
+      merge_type   = optional(string)
+    })))
+    # Launch Template
+    create_launch_template                 = optional(bool)
+    use_custom_launch_template             = optional(bool)
+    launch_template_id                     = optional(string)
+    launch_template_name                   = optional(string) # Will fall back to map key
+    launch_template_use_name_prefix        = optional(bool)
+    launch_template_version                = optional(string)
+    launch_template_default_version        = optional(string)
+    update_launch_template_default_version = optional(bool)
+    launch_template_description            = optional(string)
+    launch_template_tags                   = optional(map(string))
+    tag_specifications                     = optional(list(string))
+    ebs_optimized                          = optional(bool)
+    ami_id                                 = optional(string)
+    instance_type                          = optional(string)
+    key_name                               = optional(string)
+    disable_api_termination                = optional(bool)
+    instance_initiated_shutdown_behavior   = optional(string)
+    kernel_id                              = optional(string)
+    ram_disk_id                            = optional(string)
+    block_device_mappings = optional(map(object({
+      device_name = optional(string)
+      ebs = optional(object({
+        delete_on_termination      = optional(bool)
+        encrypted                  = optional(bool)
+        iops                       = optional(number)
+        kms_key_id                 = optional(string)
+        snapshot_id                = optional(string)
+        throughput                 = optional(number)
+        volume_initialization_rate = optional(number)
+        volume_size                = optional(number)
+        volume_type                = optional(string)
+      }))
+      no_device    = optional(string)
+      virtual_name = optional(string)
+    })))
+    capacity_reservation_specification = optional(object({
+      capacity_reservation_preference = optional(string)
+      capacity_reservation_target = optional(object({
+        capacity_reservation_id                 = optional(string)
+        capacity_reservation_resource_group_arn = optional(string)
+      }))
+    }))
+    cpu_options = optional(object({
+      amd_sev_snp      = optional(string)
+      core_count       = optional(number)
+      threads_per_core = optional(number)
+    }))
+    credit_specification = optional(object({
+      cpu_credits = optional(string)
+    }))
+    enclave_options = optional(object({
+      enabled = optional(bool)
+    }))
+    instance_requirements = optional(object({
+      accelerator_count = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      accelerator_manufacturers = optional(list(string))
+      accelerator_names         = optional(list(string))
+      accelerator_total_memory_mib = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      accelerator_types      = optional(list(string))
+      allowed_instance_types = optional(list(string))
+      bare_metal             = optional(string)
+      baseline_ebs_bandwidth_mbps = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      burstable_performance                                   = optional(string)
+      cpu_manufacturers                                       = optional(list(string))
+      excluded_instance_types                                 = optional(list(string))
+      instance_generations                                    = optional(list(string))
+      local_storage                                           = optional(string)
+      local_storage_types                                     = optional(list(string))
+      max_spot_price_as_percentage_of_optimal_on_demand_price = optional(number)
+      memory_gib_per_vcpu = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      memory_mib = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      network_bandwidth_gbps = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      network_interface_count = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      on_demand_max_price_percentage_over_lowest_price = optional(number)
+      require_hibernate_support                        = optional(bool)
+      spot_max_price_percentage_over_lowest_price      = optional(number)
+      total_local_storage_gb = optional(object({
+        max = optional(number)
+        min = optional(number)
+      }))
+      vcpu_count = optional(object({
+        max = optional(number)
+        min = string
+      }))
+    }))
+    instance_market_options = optional(object({
+      market_type = optional(string)
+      spot_options = optional(object({
+        block_duration_minutes         = optional(number)
+        instance_interruption_behavior = optional(string)
+        max_price                      = optional(string)
+        spot_instance_type             = optional(string)
+        valid_until                    = optional(string)
+      }))
+    }))
+    license_specifications = optional(list(object({
+      license_configuration_arn = string
+    })))
+    metadata_options = optional(object({
+      http_endpoint               = optional(string)
+      http_protocol_ipv6          = optional(string)
+      http_put_response_hop_limit = optional(number)
+      http_tokens                 = optional(string)
+      instance_metadata_tags      = optional(string)
+    }))
+    enable_monitoring  = optional(bool)
+    enable_efa_support = optional(bool)
+    enable_efa_only    = optional(bool)
+    efa_indices        = optional(list(string))
+    network_interfaces = optional(list(object({
+      associate_carrier_ip_address = optional(bool)
+      associate_public_ip_address  = optional(bool)
+      connection_tracking_specification = optional(object({
+        tcp_established_timeout = optional(number)
+        udp_stream_timeout      = optional(number)
+        udp_timeout             = optional(number)
+      }))
+      delete_on_termination = optional(bool)
+      description           = optional(string)
+      device_index          = optional(number)
+      ena_srd_specification = optional(object({
+        ena_srd_enabled = optional(bool)
+        ena_srd_udp_specification = optional(object({
+          ena_srd_udp_enabled = optional(bool)
+        }))
+      }))
+      interface_type       = optional(string)
+      ipv4_address_count   = optional(number)
+      ipv4_addresses       = optional(list(string))
+      ipv4_prefix_count    = optional(number)
+      ipv4_prefixes        = optional(list(string))
+      ipv6_address_count   = optional(number)
+      ipv6_addresses       = optional(list(string))
+      ipv6_prefix_count    = optional(number)
+      ipv6_prefixes        = optional(list(string))
+      network_card_index   = optional(number)
+      network_interface_id = optional(string)
+      primary_ipv6         = optional(bool)
+      private_ip_address   = optional(string)
+      security_groups      = optional(list(string))
+      subnet_id            = optional(string)
+    })))
+    placement = optional(object({
+      affinity                = optional(string)
+      availability_zone       = optional(string)
+      group_name              = optional(string)
+      host_id                 = optional(string)
+      host_resource_group_arn = optional(string)
+      partition_number        = optional(number)
+      spread_domain           = optional(string)
+      tenancy                 = optional(string)
+    }))
+    maintenance_options = optional(object({
+      auto_recovery = optional(string)
+    }))
+    private_dns_name_options = optional(object({
+      enable_resource_name_dns_aaaa_record = optional(bool)
+      enable_resource_name_dns_a_record    = optional(bool)
+      hostname_type                        = optional(string)
+    }))
+    # IAM role
+    create_iam_instance_profile   = optional(bool)
+    iam_instance_profile_arn      = optional(string)
+    iam_role_name                 = optional(string)
+    iam_role_use_name_prefix      = optional(bool)
+    iam_role_path                 = optional(string)
+    iam_role_description          = optional(string)
+    iam_role_permissions_boundary = optional(string)
+    iam_role_tags                 = optional(map(string))
+    iam_role_attach_cni_policy    = optional(bool)
+    iam_role_additional_policies  = optional(map(string))
+    create_iam_role_policy        = optional(bool)
+    iam_role_policy_statements = optional(list(object({
+      sid           = optional(string)
+      actions       = optional(list(string))
+      not_actions   = optional(list(string))
+      effect        = optional(string)
+      resources     = optional(list(string))
+      not_resources = optional(list(string))
+      principals = optional(list(object({
+        type        = string
+        identifiers = list(string)
+      })))
+      not_principals = optional(list(object({
+        type        = string
+        identifiers = list(string)
+      })))
+      condition = optional(list(object({
+        test     = string
+        values   = list(string)
+        variable = string
+      })))
+    })))
+    # Access entry
+    create_access_entry = optional(bool)
+    iam_role_arn        = optional(string)
+    # Security group
+    vpc_security_group_ids                = optional(list(string), [])
+    attach_cluster_primary_security_group = optional(bool, false)
+    create_security_group                 = optional(bool)
+    security_group_name                   = optional(string)
+    security_group_use_name_prefix        = optional(bool)
+    security_group_description            = optional(string)
+    security_group_ingress_rules = optional(map(object({
+      name                         = optional(string)
+      cidr_ipv4                    = optional(string)
+      cidr_ipv6                    = optional(string)
+      description                  = optional(string)
+      from_port                    = optional(string)
+      ip_protocol                  = optional(string)
+      prefix_list_id               = optional(string)
+      referenced_security_group_id = optional(string)
+      self                         = optional(bool)
+      tags                         = optional(map(string))
+      to_port                      = optional(string)
+    })))
+    security_group_egress_rules = optional(map(object({
+      name                         = optional(string)
+      cidr_ipv4                    = optional(string)
+      cidr_ipv6                    = optional(string)
+      description                  = optional(string)
+      from_port                    = optional(string)
+      ip_protocol                  = optional(string)
+      prefix_list_id               = optional(string)
+      referenced_security_group_id = optional(string)
+      self                         = optional(bool)
+      tags                         = optional(map(string))
+      to_port                      = optional(string)
+    })))
+    security_group_tags = optional(map(string))
+
+    tags = optional(map(string))
+  }))
+  default = null
+}
+
+################################################################################
+# EKS Managed Node Group
+################################################################################
+
+variable "eks_managed_node_groups" {
+  description = "Map of EKS managed node group definitions to create"
+  type = map(object({
+    create             = optional(bool)
+    kubernetes_version = optional(string)
+
+    # EKS Managed Node Group
+    name                           = optional(string) # Will fall back to map key
+    use_name_prefix                = optional(bool)
+    subnet_ids                     = optional(list(string))
+    min_size                       = optional(number)
+    max_size                       = optional(number)
+    desired_size                   = optional(number)
+    ami_id                         = optional(string)
+    ami_type                       = optional(string)
+    ami_release_version            = optional(string)
+    use_latest_ami_release_version = optional(bool)
+    capacity_type                  = optional(string)
+    disk_size                      = optional(number)
+    force_update_version           = optional(bool)
+    instance_types                 = optional(list(string))
+    labels                         = optional(map(string))
+    node_repair_config = optional(object({
+      enabled = optional(bool)
+    }))
+    remote_access = optional(object({
+      ec2_ssh_key               = optional(string)
+      source_security_group_ids = optional(list(string))
+    }))
+    taints = optional(map(object({
+      key    = string
+      value  = optional(string)
+      effect = string
+    })))
+    update_config = optional(object({
+      max_unavailable            = optional(number)
+      max_unavailable_percentage = optional(number)
+    }))
+    timeouts = optional(object({
+      create = optional(string)
+      update = optional(string)
+      delete = optional(string)
+    }))
+    # User data
+    enable_bootstrap_user_data = optional(bool)
+    pre_bootstrap_user_data    = optional(string)
+    post_bootstrap_user_data   = optional(string)
+    bootstrap_extra_args       = optional(string)
+    user_data_template_path    = optional(string)
+    cloudinit_pre_nodeadm = optional(list(object({
+      content      = string
+      content_type = optional(string)
+      filename     = optional(string)
+      merge_type   = optional(string)
+    })))
+    cloudinit_post_nodeadm = optional(list(object({
+      content      = string
+      content_type = optional(string)
+      filename     = optional(string)
+      merge_type   = optional(string)
+    })))
+    # Launch Template
+    create_launch_template                 = optional(bool)
+    use_custom_launch_template             = optional(bool)
+    launch_template_id                     = optional(string)
+    launch_template_name                   = optional(string) # Will fall back to map key
+    launch_template_use_name_prefix        = optional(bool)
+    launch_template_version                = optional(string)
+    launch_template_default_version        = optional(string)
+    update_launch_template_default_version = optional(bool)
+    launch_template_description            = optional(string)
+    launch_template_tags                   = optional(map(string))
+    tag_specifications                     = optional(list(string))
+    ebs_optimized                          = optional(bool)
+    key_name                               = optional(string)
+    disable_api_termination                = optional(bool)
+    kernel_id                              = optional(string)
+    ram_disk_id                            = optional(string)
+    block_device_mappings = optional(map(object({
+      device_name = optional(string)
+      ebs = optional(object({
+        delete_on_termination      = optional(bool)
+        encrypted                  = optional(bool)
+        iops                       = optional(number)
+        kms_key_id                 = optional(string)
+        snapshot_id                = optional(string)
+        throughput                 = optional(number)
+        volume_initialization_rate = optional(number)
+        volume_size                = optional(number)
+        volume_type                = optional(string)
+      }))
+      no_device    = optional(string)
+      virtual_name = optional(string)
+    })))
+    capacity_reservation_specification = optional(object({
+      capacity_reservation_preference = optional(string)
+      capacity_reservation_target = optional(object({
+        capacity_reservation_id                 = optional(string)
+        capacity_reservation_resource_group_arn = optional(string)
+      }))
+    }))
+    cpu_options = optional(object({
+      amd_sev_snp      = optional(string)
+      core_count       = optional(number)
+      threads_per_core = optional(number)
+    }))
+    credit_specification = optional(object({
+      cpu_credits = optional(string)
+    }))
+    enclave_options = optional(object({
+      enabled = optional(bool)
+    }))
+    instance_market_options = optional(object({
+      market_type = optional(string)
+      spot_options = optional(object({
+        block_duration_minutes         = optional(number)
+        instance_interruption_behavior = optional(string)
+        max_price                      = optional(string)
+        spot_instance_type             = optional(string)
+        valid_until                    = optional(string)
+      }))
+    }))
+    license_specifications = optional(list(object({
+      license_configuration_arn = string
+    })))
+    metadata_options = optional(object({
+      http_endpoint               = optional(string)
+      http_protocol_ipv6          = optional(string)
+      http_put_response_hop_limit = optional(number)
+      http_tokens                 = optional(string)
+      instance_metadata_tags      = optional(string)
+    }))
+    enable_monitoring      = optional(bool)
+    enable_efa_support     = optional(bool)
+    enable_efa_only        = optional(bool)
+    efa_indices            = optional(list(string))
+    create_placement_group = optional(bool)
+    placement = optional(object({
+      affinity                = optional(string)
+      availability_zone       = optional(string)
+      group_name              = optional(string)
+      host_id                 = optional(string)
+      host_resource_group_arn = optional(string)
+      partition_number        = optional(number)
+      spread_domain           = optional(string)
+      tenancy                 = optional(string)
+    }))
+    network_interfaces = optional(list(object({
+      associate_carrier_ip_address = optional(bool)
+      associate_public_ip_address  = optional(bool)
+      connection_tracking_specification = optional(object({
+        tcp_established_timeout = optional(number)
+        udp_stream_timeout      = optional(number)
+        udp_timeout             = optional(number)
+      }))
+      delete_on_termination = optional(bool)
+      description           = optional(string)
+      device_index          = optional(number)
+      ena_srd_specification = optional(object({
+        ena_srd_enabled = optional(bool)
+        ena_srd_udp_specification = optional(object({
+          ena_srd_udp_enabled = optional(bool)
+        }))
+      }))
+      interface_type       = optional(string)
+      ipv4_address_count   = optional(number)
+      ipv4_addresses       = optional(list(string))
+      ipv4_prefix_count    = optional(number)
+      ipv4_prefixes        = optional(list(string))
+      ipv6_address_count   = optional(number)
+      ipv6_addresses       = optional(list(string))
+      ipv6_prefix_count    = optional(number)
+      ipv6_prefixes        = optional(list(string))
+      network_card_index   = optional(number)
+      network_interface_id = optional(string)
+      primary_ipv6         = optional(bool)
+      private_ip_address   = optional(string)
+      security_groups      = optional(list(string), [])
+      subnet_id            = optional(string)
+    })))
+    maintenance_options = optional(object({
+      auto_recovery = optional(string)
+    }))
+    private_dns_name_options = optional(object({
+      enable_resource_name_dns_aaaa_record = optional(bool)
+      enable_resource_name_dns_a_record    = optional(bool)
+      hostname_type                        = optional(string)
+    }))
+    # IAM role
+    create_iam_role               = optional(bool)
+    iam_role_arn                  = optional(string)
+    iam_role_name                 = optional(string)
+    iam_role_use_name_prefix      = optional(bool)
+    iam_role_path                 = optional(string)
+    iam_role_description          = optional(string)
+    iam_role_permissions_boundary = optional(string)
+    iam_role_tags                 = optional(map(string))
+    iam_role_attach_cni_policy    = optional(bool)
+    iam_role_additional_policies  = optional(map(string))
+    create_iam_role_policy        = optional(bool)
+    iam_role_policy_statements = optional(list(object({
+      sid           = optional(string)
+      actions       = optional(list(string))
+      not_actions   = optional(list(string))
+      effect        = optional(string)
+      resources     = optional(list(string))
+      not_resources = optional(list(string))
+      principals = optional(list(object({
+        type        = string
+        identifiers = list(string)
+      })))
+      not_principals = optional(list(object({
+        type        = string
+        identifiers = list(string)
+      })))
+      condition = optional(list(object({
+        test     = string
+        values   = list(string)
+        variable = string
+      })))
+    })))
+    # Security group
+    vpc_security_group_ids                = optional(list(string), [])
+    attach_cluster_primary_security_group = optional(bool, false)
+    cluster_primary_security_group_id     = optional(string)
+    create_security_group                 = optional(bool)
+    security_group_name                   = optional(string)
+    security_group_use_name_prefix        = optional(bool)
+    security_group_description            = optional(string)
+    security_group_ingress_rules = optional(map(object({
+      name                         = optional(string)
+      cidr_ipv4                    = optional(string)
+      cidr_ipv6                    = optional(string)
+      description                  = optional(string)
+      from_port                    = optional(string)
+      ip_protocol                  = optional(string)
+      prefix_list_id               = optional(string)
+      referenced_security_group_id = optional(string)
+      self                         = optional(bool)
+      tags                         = optional(map(string))
+      to_port                      = optional(string)
+    })))
+    security_group_egress_rules = optional(map(object({
+      name                         = optional(string)
+      cidr_ipv4                    = optional(string)
+      cidr_ipv6                    = optional(string)
+      description                  = optional(string)
+      from_port                    = optional(string)
+      ip_protocol                  = optional(string)
+      prefix_list_id               = optional(string)
+      referenced_security_group_id = optional(string)
+      self                         = optional(bool)
+      tags                         = optional(map(string))
+      to_port                      = optional(string)
+    })), {})
+    security_group_tags = optional(map(string))
+
+    tags = optional(map(string))
+  }))
+  default = null
+}
+
+variable "putin_khuylo" {
+  description = "Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo!"
+  type        = bool
+  default     = true
 }
diff --git a/versions.tf b/versions.tf
index db42ebeb2f..71efea2e03 100644
--- a/versions.tf
+++ b/versions.tf
@@ -1,13 +1,18 @@
 terraform {
-  required_version = ">= 0.13.1"
+  required_version = ">= 1.5.7"
 
   required_providers {
-    aws        = ">= 3.40.0"
-    local      = ">= 1.4"
-    kubernetes = ">= 1.11.1"
-    http = {
-      source  = "terraform-aws-modules/http"
-      version = ">= 2.4.1"
+    aws = {
+      source  = "hashicorp/aws"
+      version = ">= 6.0"
+    }
+    tls = {
+      source  = "hashicorp/tls"
+      version = ">= 4.0"
+    }
+    time = {
+      source  = "hashicorp/time"
+      version = ">= 0.9"
     }
   }
 }
diff --git a/workers.tf b/workers.tf
deleted file mode 100644
index 31a2ffd568..0000000000
--- a/workers.tf
+++ /dev/null
@@ -1,501 +0,0 @@
-# Worker Groups using Launch Configurations
-
-resource "aws_autoscaling_group" "workers" {
-  count = var.create_eks ? local.worker_group_count : 0
-  name_prefix = join(
-    "-",
-    compact(
-      [
-        coalescelist(aws_eks_cluster.this[*].name, [""])[0],
-        lookup(var.worker_groups[count.index], "name", count.index)
-      ]
-    )
-  )
-  desired_capacity = lookup(
-    var.worker_groups[count.index],
-    "asg_desired_capacity",
-    local.workers_group_defaults["asg_desired_capacity"],
-  )
-  max_size = lookup(
-    var.worker_groups[count.index],
-    "asg_max_size",
-    local.workers_group_defaults["asg_max_size"],
-  )
-  min_size = lookup(
-    var.worker_groups[count.index],
-    "asg_min_size",
-    local.workers_group_defaults["asg_min_size"],
-  )
-  force_delete = lookup(
-    var.worker_groups[count.index],
-    "asg_force_delete",
-    local.workers_group_defaults["asg_force_delete"],
-  )
-  target_group_arns = lookup(
-    var.worker_groups[count.index],
-    "target_group_arns",
-    local.workers_group_defaults["target_group_arns"]
-  )
-  load_balancers = lookup(
-    var.worker_groups[count.index],
-    "load_balancers",
-    local.workers_group_defaults["load_balancers"]
-  )
-  service_linked_role_arn = lookup(
-    var.worker_groups[count.index],
-    "service_linked_role_arn",
-    local.workers_group_defaults["service_linked_role_arn"],
-  )
-  launch_configuration = aws_launch_configuration.workers.*.id[count.index]
-  vpc_zone_identifier = lookup(
-    var.worker_groups[count.index],
-    "subnets",
-    local.workers_group_defaults["subnets"]
-  )
-  protect_from_scale_in = lookup(
-    var.worker_groups[count.index],
-    "protect_from_scale_in",
-    local.workers_group_defaults["protect_from_scale_in"],
-  )
-  suspended_processes = lookup(
-    var.worker_groups[count.index],
-    "suspended_processes",
-    local.workers_group_defaults["suspended_processes"]
-  )
-  enabled_metrics = lookup(
-    var.worker_groups[count.index],
-    "enabled_metrics",
-    local.workers_group_defaults["enabled_metrics"]
-  )
-  placement_group = lookup(
-    var.worker_groups[count.index],
-    "placement_group",
-    local.workers_group_defaults["placement_group"],
-  )
-  termination_policies = lookup(
-    var.worker_groups[count.index],
-    "termination_policies",
-    local.workers_group_defaults["termination_policies"]
-  )
-  max_instance_lifetime = lookup(
-    var.worker_groups[count.index],
-    "max_instance_lifetime",
-    local.workers_group_defaults["max_instance_lifetime"],
-  )
-  default_cooldown = lookup(
-    var.worker_groups[count.index],
-    "default_cooldown",
-    local.workers_group_defaults["default_cooldown"]
-  )
-  health_check_type = lookup(
-    var.worker_groups[count.index],
-    "health_check_type",
-    local.workers_group_defaults["health_check_type"]
-  )
-  health_check_grace_period = lookup(
-    var.worker_groups[count.index],
-    "health_check_grace_period",
-    local.workers_group_defaults["health_check_grace_period"]
-  )
-  capacity_rebalance = lookup(
-    var.worker_groups[count.index],
-    "capacity_rebalance",
-    local.workers_group_defaults["capacity_rebalance"]
-  )
-
-  dynamic "initial_lifecycle_hook" {
-    for_each = var.worker_create_initial_lifecycle_hooks ? lookup(var.worker_groups[count.index], "asg_initial_lifecycle_hooks", local.workers_group_defaults["asg_initial_lifecycle_hooks"]) : []
-    content {
-      name                    = initial_lifecycle_hook.value["name"]
-      lifecycle_transition    = initial_lifecycle_hook.value["lifecycle_transition"]
-      notification_metadata   = lookup(initial_lifecycle_hook.value, "notification_metadata", null)
-      heartbeat_timeout       = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null)
-      notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null)
-      role_arn                = lookup(initial_lifecycle_hook.value, "role_arn", null)
-      default_result          = lookup(initial_lifecycle_hook.value, "default_result", null)
-    }
-  }
-
-  dynamic "warm_pool" {
-    for_each = lookup(var.worker_groups[count.index], "warm_pool", null) != null ? [lookup(var.worker_groups[count.index], "warm_pool")] : []
-
-    content {
-      pool_state                  = lookup(warm_pool.value, "pool_state", null)
-      min_size                    = lookup(warm_pool.value, "min_size", null)
-      max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
-    }
-  }
-
-  dynamic "tag" {
-    for_each = concat(
-      [
-        {
-          "key"                 = "Name"
-          "value"               = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg"
-          "propagate_at_launch" = true
-        },
-        {
-          "key"                 = "kubernetes.io/cluster/${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}"
-          "value"               = "owned"
-          "propagate_at_launch" = true
-        },
-        {
-          "key"                 = "k8s.io/cluster/${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}"
-          "value"               = "owned"
-          "propagate_at_launch" = true
-        },
-      ],
-      [
-        for tag_key, tag_value in var.tags :
-        {
-          "key"                 = tag_key,
-          "value"               = tag_value,
-          "propagate_at_launch" = "true"
-        }
-        if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
-      ],
-      lookup(
-        var.worker_groups[count.index],
-        "tags",
-        local.workers_group_defaults["tags"]
-      )
-    )
-    content {
-      key                 = tag.value.key
-      value               = tag.value.value
-      propagate_at_launch = tag.value.propagate_at_launch
-    }
-  }
-
-  # logic duplicated in workers_launch_template.tf
-  dynamic "instance_refresh" {
-    for_each = lookup(var.worker_groups[count.index],
-      "instance_refresh_enabled",
-    local.workers_group_defaults["instance_refresh_enabled"]) ? [1] : []
-    content {
-      strategy = lookup(
-        var.worker_groups[count.index], "instance_refresh_strategy",
-        local.workers_group_defaults["instance_refresh_strategy"]
-      )
-      preferences {
-        instance_warmup = lookup(
-          var.worker_groups[count.index], "instance_refresh_instance_warmup",
-          local.workers_group_defaults["instance_refresh_instance_warmup"]
-        )
-        min_healthy_percentage = lookup(
-          var.worker_groups[count.index], "instance_refresh_min_healthy_percentage",
-          local.workers_group_defaults["instance_refresh_min_healthy_percentage"]
-        )
-      }
-      triggers = lookup(
-        var.worker_groups[count.index], "instance_refresh_triggers",
-        local.workers_group_defaults["instance_refresh_triggers"]
-      )
-    }
-  }
-
-  lifecycle {
-    create_before_destroy = true
-    ignore_changes        = [desired_capacity]
-  }
-}
-
-resource "aws_launch_configuration" "workers" {
-  count       = var.create_eks ? local.worker_group_count : 0
-  name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups[count.index], "name", count.index)}"
-  associate_public_ip_address = lookup(
-    var.worker_groups[count.index],
-    "public_ip",
-    local.workers_group_defaults["public_ip"],
-  )
-  security_groups = flatten([
-    local.worker_security_group_id,
-    var.worker_additional_security_group_ids,
-    lookup(
-      var.worker_groups[count.index],
-      "additional_security_group_ids",
-      local.workers_group_defaults["additional_security_group_ids"]
-    )
-  ])
-  iam_instance_profile = coalescelist(
-    aws_iam_instance_profile.workers.*.id,
-    data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.name,
-  )[count.index]
-  image_id = lookup(
-    var.worker_groups[count.index],
-    "ami_id",
-    lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
-  )
-  instance_type = lookup(
-    var.worker_groups[count.index],
-    "instance_type",
-    local.workers_group_defaults["instance_type"],
-  )
-  key_name = lookup(
-    var.worker_groups[count.index],
-    "key_name",
-    local.workers_group_defaults["key_name"],
-  )
-  user_data_base64 = base64encode(local.userdata_rendered[count.index])
-  ebs_optimized = lookup(
-    var.worker_groups[count.index],
-    "ebs_optimized",
-    !contains(
-      local.ebs_optimized_not_supported,
-      lookup(
-        var.worker_groups[count.index],
-        "instance_type",
-        local.workers_group_defaults["instance_type"]
-      )
-    )
-  )
-  enable_monitoring = lookup(
-    var.worker_groups[count.index],
-    "enable_monitoring",
-    local.workers_group_defaults["enable_monitoring"],
-  )
-  spot_price = lookup(
-    var.worker_groups[count.index],
-    "spot_price",
-    local.workers_group_defaults["spot_price"],
-  )
-  placement_tenancy = lookup(
-    var.worker_groups[count.index],
-    "placement_tenancy",
-    local.workers_group_defaults["placement_tenancy"],
-  )
-
-  metadata_options {
-    http_endpoint = lookup(
-      var.worker_groups[count.index],
-      "metadata_http_endpoint",
-      local.workers_group_defaults["metadata_http_endpoint"],
-    )
-    http_tokens = lookup(
-      var.worker_groups[count.index],
-      "metadata_http_tokens",
-      local.workers_group_defaults["metadata_http_tokens"],
-    )
-    http_put_response_hop_limit = lookup(
-      var.worker_groups[count.index],
-      "metadata_http_put_response_hop_limit",
-      local.workers_group_defaults["metadata_http_put_response_hop_limit"],
-    )
-  }
-
-  root_block_device {
-    encrypted = lookup(
-      var.worker_groups[count.index],
-      "root_encrypted",
-      local.workers_group_defaults["root_encrypted"],
-    )
-    volume_size = lookup(
-      var.worker_groups[count.index],
-      "root_volume_size",
-      local.workers_group_defaults["root_volume_size"],
-    )
-    volume_type = lookup(
-      var.worker_groups[count.index],
-      "root_volume_type",
-      local.workers_group_defaults["root_volume_type"],
-    )
-    iops = lookup(
-      var.worker_groups[count.index],
-      "root_iops",
-      local.workers_group_defaults["root_iops"],
-    )
-    delete_on_termination = true
-  }
-
-  dynamic "ebs_block_device" {
-    for_each = lookup(var.worker_groups[count.index], "additional_ebs_volumes", local.workers_group_defaults["additional_ebs_volumes"])
-
-    content {
-      device_name = ebs_block_device.value.block_device_name
-      volume_size = lookup(
-        ebs_block_device.value,
-        "volume_size",
-        local.workers_group_defaults["root_volume_size"],
-      )
-      volume_type = lookup(
-        ebs_block_device.value,
-        "volume_type",
-        local.workers_group_defaults["root_volume_type"],
-      )
-      iops = lookup(
-        ebs_block_device.value,
-        "iops",
-        local.workers_group_defaults["root_iops"],
-      )
-      encrypted = lookup(
-        ebs_block_device.value,
-        "encrypted",
-        local.workers_group_defaults["root_encrypted"],
-      )
-      delete_on_termination = lookup(ebs_block_device.value, "delete_on_termination", true)
-    }
-  }
-
-  lifecycle {
-    create_before_destroy = true
-  }
-
-  # Prevent premature access of security group roles and policies by pods that
-  # require permissions on create/destroy that depend on workers.
-  depends_on = [
-    aws_security_group_rule.workers_egress_internet,
-    aws_security_group_rule.workers_ingress_self,
-    aws_security_group_rule.workers_ingress_cluster,
-    aws_security_group_rule.workers_ingress_cluster_kubelet,
-    aws_security_group_rule.workers_ingress_cluster_https,
-    aws_security_group_rule.workers_ingress_cluster_primary,
-    aws_security_group_rule.cluster_primary_ingress_workers,
-    aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
-    aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
-    aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly,
-    aws_iam_role_policy_attachment.workers_additional_policies
-  ]
-}
-
-resource "aws_security_group" "workers" {
-  count       = var.worker_create_security_group && var.create_eks ? 1 : 0
-  name_prefix = var.cluster_name
-  description = "Security group for all nodes in the cluster."
-  vpc_id      = var.vpc_id
-  tags = merge(
-    var.tags,
-    {
-      "Name"                                      = "${var.cluster_name}-eks_worker_sg"
-      "kubernetes.io/cluster/${var.cluster_name}" = "owned"
-    },
-  )
-}
-
-resource "aws_security_group_rule" "workers_egress_internet" {
-  count             = var.worker_create_security_group && var.create_eks ? 1 : 0
-  description       = "Allow nodes all egress to the Internet."
-  protocol          = "-1"
-  security_group_id = local.worker_security_group_id
-  cidr_blocks       = var.workers_egress_cidrs
-  from_port         = 0
-  to_port           = 0
-  type              = "egress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_self" {
-  count                    = var.worker_create_security_group && var.create_eks ? 1 : 0
-  description              = "Allow node to communicate with each other."
-  protocol                 = "-1"
-  security_group_id        = local.worker_security_group_id
-  source_security_group_id = local.worker_security_group_id
-  from_port                = 0
-  to_port                  = 65535
-  type                     = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster" {
-  count                    = var.worker_create_security_group && var.create_eks ? 1 : 0
-  description              = "Allow workers pods to receive communication from the cluster control plane."
-  protocol                 = "tcp"
-  security_group_id        = local.worker_security_group_id
-  source_security_group_id = local.cluster_security_group_id
-  from_port                = var.worker_sg_ingress_from_port
-  to_port                  = 65535
-  type                     = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
-  count                    = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
-  description              = "Allow workers Kubelets to receive communication from the cluster control plane."
-  protocol                 = "tcp"
-  security_group_id        = local.worker_security_group_id
-  source_security_group_id = local.cluster_security_group_id
-  from_port                = 10250
-  to_port                  = 10250
-  type                     = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster_https" {
-  count                    = var.worker_create_security_group && var.create_eks ? 1 : 0
-  description              = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
-  protocol                 = "tcp"
-  security_group_id        = local.worker_security_group_id
-  source_security_group_id = local.cluster_security_group_id
-  from_port                = 443
-  to_port                  = 443
-  type                     = "ingress"
-}
-
-resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
-  count                    = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
-  description              = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
-  protocol                 = "all"
-  security_group_id        = local.worker_security_group_id
-  source_security_group_id = local.cluster_primary_security_group_id
-  from_port                = 0
-  to_port                  = 65535
-  type                     = "ingress"
-}
-
-resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
-  count                    = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
-  description              = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
-  protocol                 = "all"
-  security_group_id        = local.cluster_primary_security_group_id
-  source_security_group_id = local.worker_security_group_id
-  from_port                = 0
-  to_port                  = 65535
-  type                     = "ingress"
-}
-
-resource "aws_iam_role" "workers" {
-  count                 = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
-  name_prefix           = var.workers_role_name != "" ? null : coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-  name                  = var.workers_role_name != "" ? var.workers_role_name : null
-  assume_role_policy    = data.aws_iam_policy_document.workers_assume_role_policy.json
-  permissions_boundary  = var.permissions_boundary
-  path                  = var.iam_path
-  force_detach_policies = true
-  tags                  = var.tags
-}
-
-resource "aws_iam_instance_profile" "workers" {
-  count       = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_count : 0
-  name_prefix = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-  role = lookup(
-    var.worker_groups[count.index],
-    "iam_role_id",
-    local.default_iam_role_id,
-  )
-
-  path = var.iam_path
-  tags = var.tags
-
-  lifecycle {
-    create_before_destroy = true
-  }
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
-  count      = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
-  policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
-  role       = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
-  count      = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0
-  policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
-  role       = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
-  count      = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
-  policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
-  role       = aws_iam_role.workers[0].name
-}
-
-resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
-  count      = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0
-  role       = aws_iam_role.workers[0].name
-  policy_arn = var.workers_additional_policies[count.index]
-}
diff --git a/workers_launch_template.tf b/workers_launch_template.tf
deleted file mode 100644
index 6e14b7dcb0..0000000000
--- a/workers_launch_template.tf
+++ /dev/null
@@ -1,608 +0,0 @@
-# Worker Groups using Launch Templates
-
-resource "aws_autoscaling_group" "workers_launch_template" {
-  count = var.create_eks ? local.worker_group_launch_template_count : 0
-  name_prefix = join(
-    "-",
-    compact(
-      [
-        coalescelist(aws_eks_cluster.this[*].name, [""])[0],
-        lookup(var.worker_groups_launch_template[count.index], "name", count.index)
-      ]
-    )
-  )
-  desired_capacity = lookup(
-    var.worker_groups_launch_template[count.index],
-    "asg_desired_capacity",
-    local.workers_group_defaults["asg_desired_capacity"],
-  )
-  max_size = lookup(
-    var.worker_groups_launch_template[count.index],
-    "asg_max_size",
-    local.workers_group_defaults["asg_max_size"],
-  )
-  min_size = lookup(
-    var.worker_groups_launch_template[count.index],
-    "asg_min_size",
-    local.workers_group_defaults["asg_min_size"],
-  )
-  force_delete = lookup(
-    var.worker_groups_launch_template[count.index],
-    "asg_force_delete",
-    local.workers_group_defaults["asg_force_delete"],
-  )
-  target_group_arns = lookup(
-    var.worker_groups_launch_template[count.index],
-    "target_group_arns",
-    local.workers_group_defaults["target_group_arns"]
-  )
-  load_balancers = lookup(
-    var.worker_groups_launch_template[count.index],
-    "load_balancers",
-    local.workers_group_defaults["load_balancers"]
-  )
-  service_linked_role_arn = lookup(
-    var.worker_groups_launch_template[count.index],
-    "service_linked_role_arn",
-    local.workers_group_defaults["service_linked_role_arn"],
-  )
-  vpc_zone_identifier = lookup(
-    var.worker_groups_launch_template[count.index],
-    "subnets",
-    local.workers_group_defaults["subnets"]
-  )
-  protect_from_scale_in = lookup(
-    var.worker_groups_launch_template[count.index],
-    "protect_from_scale_in",
-    local.workers_group_defaults["protect_from_scale_in"],
-  )
-  suspended_processes = lookup(
-    var.worker_groups_launch_template[count.index],
-    "suspended_processes",
-    local.workers_group_defaults["suspended_processes"]
-  )
-  enabled_metrics = lookup(
-    var.worker_groups_launch_template[count.index],
-    "enabled_metrics",
-    local.workers_group_defaults["enabled_metrics"]
-  )
-  placement_group = lookup(
-    var.worker_groups_launch_template[count.index],
-    "placement_group",
-    local.workers_group_defaults["placement_group"],
-  )
-  termination_policies = lookup(
-    var.worker_groups_launch_template[count.index],
-    "termination_policies",
-    local.workers_group_defaults["termination_policies"]
-  )
-  max_instance_lifetime = lookup(
-    var.worker_groups_launch_template[count.index],
-    "max_instance_lifetime",
-    local.workers_group_defaults["max_instance_lifetime"],
-  )
-  default_cooldown = lookup(
-    var.worker_groups_launch_template[count.index],
-    "default_cooldown",
-    local.workers_group_defaults["default_cooldown"]
-  )
-  health_check_type = lookup(
-    var.worker_groups_launch_template[count.index],
-    "health_check_type",
-    local.workers_group_defaults["health_check_type"]
-  )
-  health_check_grace_period = lookup(
-    var.worker_groups_launch_template[count.index],
-    "health_check_grace_period",
-    local.workers_group_defaults["health_check_grace_period"]
-  )
-  capacity_rebalance = lookup(
-    var.worker_groups_launch_template[count.index],
-    "capacity_rebalance",
-    local.workers_group_defaults["capacity_rebalance"]
-  )
-
-  dynamic "mixed_instances_policy" {
-    iterator = item
-    for_each = (lookup(var.worker_groups_launch_template[count.index], "override_instance_types", null) != null) || (lookup(var.worker_groups_launch_template[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"]) != null) ? [var.worker_groups_launch_template[count.index]] : []
-
-    content {
-      instances_distribution {
-        on_demand_allocation_strategy = lookup(
-          item.value,
-          "on_demand_allocation_strategy",
-          "prioritized",
-        )
-        on_demand_base_capacity = lookup(
-          item.value,
-          "on_demand_base_capacity",
-          local.workers_group_defaults["on_demand_base_capacity"],
-        )
-        on_demand_percentage_above_base_capacity = lookup(
-          item.value,
-          "on_demand_percentage_above_base_capacity",
-          local.workers_group_defaults["on_demand_percentage_above_base_capacity"],
-        )
-        spot_allocation_strategy = lookup(
-          item.value,
-          "spot_allocation_strategy",
-          local.workers_group_defaults["spot_allocation_strategy"],
-        )
-        spot_instance_pools = lookup(
-          item.value,
-          "spot_instance_pools",
-          local.workers_group_defaults["spot_instance_pools"],
-        )
-        spot_max_price = lookup(
-          item.value,
-          "spot_max_price",
-          local.workers_group_defaults["spot_max_price"],
-        )
-      }
-
-      launch_template {
-        launch_template_specification {
-          launch_template_id = aws_launch_template.workers_launch_template.*.id[count.index]
-          version = lookup(
-            var.worker_groups_launch_template[count.index],
-            "launch_template_version",
-            lookup(
-              var.worker_groups_launch_template[count.index],
-              "launch_template_version",
-              local.workers_group_defaults["launch_template_version"]
-            ) == "$Latest"
-            ? aws_launch_template.workers_launch_template.*.latest_version[count.index]
-            : aws_launch_template.workers_launch_template.*.default_version[count.index]
-          )
-        }
-
-        dynamic "override" {
-          for_each = lookup(
-            var.worker_groups_launch_template[count.index],
-            "override_instance_types",
-            local.workers_group_defaults["override_instance_types"]
-          )
-
-          content {
-            instance_type = override.value
-          }
-        }
-      }
-    }
-  }
-
-  dynamic "launch_template" {
-    iterator = item
-    for_each = (lookup(var.worker_groups_launch_template[count.index], "override_instance_types", null) != null) || (lookup(var.worker_groups_launch_template[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"]) != null) ? [] : [var.worker_groups_launch_template[count.index]]
-
-    content {
-      id = aws_launch_template.workers_launch_template.*.id[count.index]
-      version = lookup(
-        var.worker_groups_launch_template[count.index],
-        "launch_template_version",
-        lookup(
-          var.worker_groups_launch_template[count.index],
-          "launch_template_version",
-          local.workers_group_defaults["launch_template_version"]
-        ) == "$Latest"
-        ? aws_launch_template.workers_launch_template.*.latest_version[count.index]
-        : aws_launch_template.workers_launch_template.*.default_version[count.index]
-      )
-    }
-  }
-
-  dynamic "initial_lifecycle_hook" {
-    for_each = var.worker_create_initial_lifecycle_hooks ? lookup(var.worker_groups_launch_template[count.index], "asg_initial_lifecycle_hooks", local.workers_group_defaults["asg_initial_lifecycle_hooks"]) : []
-    content {
-      name                    = initial_lifecycle_hook.value["name"]
-      lifecycle_transition    = initial_lifecycle_hook.value["lifecycle_transition"]
-      notification_metadata   = lookup(initial_lifecycle_hook.value, "notification_metadata", null)
-      heartbeat_timeout       = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null)
-      notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null)
-      role_arn                = lookup(initial_lifecycle_hook.value, "role_arn", null)
-      default_result          = lookup(initial_lifecycle_hook.value, "default_result", null)
-    }
-  }
-
-  dynamic "warm_pool" {
-    for_each = lookup(var.worker_groups_launch_template[count.index], "warm_pool", null) != null ? [lookup(var.worker_groups_launch_template[count.index], "warm_pool")] : []
-
-    content {
-      pool_state                  = lookup(warm_pool.value, "pool_state", null)
-      min_size                    = lookup(warm_pool.value, "min_size", null)
-      max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null)
-    }
-  }
-
-  dynamic "tag" {
-    for_each = concat(
-      [
-        {
-          "key" = "Name"
-          "value" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
-            var.worker_groups_launch_template[count.index],
-            "name",
-            count.index,
-          )}-eks_asg"
-          "propagate_at_launch" = true
-        },
-        {
-          "key"                 = "kubernetes.io/cluster/${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}"
-          "value"               = "owned"
-          "propagate_at_launch" = true
-        },
-      ],
-      [
-        for tag_key, tag_value in var.tags :
-        tomap({
-          key                 = tag_key
-          value               = tag_value
-          propagate_at_launch = "true"
-        })
-        if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_launch_template[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
-      ],
-      lookup(
-        var.worker_groups_launch_template[count.index],
-        "tags",
-        local.workers_group_defaults["tags"]
-      )
-    )
-    content {
-      key                 = tag.value.key
-      value               = tag.value.value
-      propagate_at_launch = tag.value.propagate_at_launch
-    }
-  }
-
-  # logic duplicated in workers.tf
-  dynamic "instance_refresh" {
-    for_each = lookup(var.worker_groups_launch_template[count.index],
-      "instance_refresh_enabled",
-    local.workers_group_defaults["instance_refresh_enabled"]) ? [1] : []
-    content {
-      strategy = lookup(
-        var.worker_groups_launch_template[count.index], "instance_refresh_strategy",
-        local.workers_group_defaults["instance_refresh_strategy"]
-      )
-      preferences {
-        instance_warmup = lookup(
-          var.worker_groups_launch_template[count.index], "instance_refresh_instance_warmup",
-          local.workers_group_defaults["instance_refresh_instance_warmup"]
-        )
-        min_healthy_percentage = lookup(
-          var.worker_groups_launch_template[count.index], "instance_refresh_min_healthy_percentage",
-          local.workers_group_defaults["instance_refresh_min_healthy_percentage"]
-        )
-      }
-      triggers = lookup(
-        var.worker_groups_launch_template[count.index], "instance_refresh_triggers",
-        local.workers_group_defaults["instance_refresh_triggers"]
-      )
-    }
-  }
-
-  lifecycle {
-    create_before_destroy = true
-    ignore_changes        = [desired_capacity]
-  }
-}
-
-resource "aws_launch_template" "workers_launch_template" {
-  count = var.create_eks ? (local.worker_group_launch_template_count) : 0
-  name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
-    var.worker_groups_launch_template[count.index],
-    "name",
-    count.index,
-  )}"
-
-  update_default_version = lookup(
-    var.worker_groups_launch_template[count.index],
-    "update_default_version",
-    local.workers_group_defaults["update_default_version"],
-  )
-
-  network_interfaces {
-    associate_public_ip_address = lookup(
-      var.worker_groups_launch_template[count.index],
-      "public_ip",
-      local.workers_group_defaults["public_ip"],
-    )
-    delete_on_termination = lookup(
-      var.worker_groups_launch_template[count.index],
-      "eni_delete",
-      local.workers_group_defaults["eni_delete"],
-    )
-    security_groups = flatten([
-      local.worker_security_group_id,
-      var.worker_additional_security_group_ids,
-      lookup(
-        var.worker_groups_launch_template[count.index],
-        "additional_security_group_ids",
-        local.workers_group_defaults["additional_security_group_ids"],
-      ),
-    ])
-  }
-
-  iam_instance_profile {
-    name = coalescelist(
-      aws_iam_instance_profile.workers_launch_template.*.name,
-      data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.name,
-    )[count.index]
-  }
-
-  enclave_options {
-    enabled = lookup(
-      var.worker_groups_launch_template[count.index],
-      "enclave_support",
-      local.workers_group_defaults["enclave_support"],
-    )
-  }
-
-  image_id = lookup(
-    var.worker_groups_launch_template[count.index],
-    "ami_id",
-    lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.default_ami_id_windows : local.default_ami_id_linux,
-  )
-  instance_type = lookup(
-    var.worker_groups_launch_template[count.index],
-    "instance_type",
-    local.workers_group_defaults["instance_type"],
-  )
-
-  dynamic "elastic_inference_accelerator" {
-    for_each = lookup(
-      var.worker_groups_launch_template[count.index],
-      "elastic_inference_accelerator",
-      local.workers_group_defaults["elastic_inference_accelerator"]
-    ) != null ? [lookup(var.worker_groups_launch_template[count.index], "elastic_inference_accelerator", local.workers_group_defaults["elastic_inference_accelerator"])] : []
-    content {
-      type = elastic_inference_accelerator.value
-    }
-  }
-
-  key_name = lookup(
-    var.worker_groups_launch_template[count.index],
-    "key_name",
-    local.workers_group_defaults["key_name"],
-  )
-  user_data = base64encode(
-    local.launch_template_userdata_rendered[count.index],
-  )
-
-  ebs_optimized = lookup(
-    var.worker_groups_launch_template[count.index],
-    "ebs_optimized",
-    !contains(
-      local.ebs_optimized_not_supported,
-      lookup(
-        var.worker_groups_launch_template[count.index],
-        "instance_type",
-        local.workers_group_defaults["instance_type"],
-      )
-    )
-  )
-
-  metadata_options {
-    http_endpoint = lookup(
-      var.worker_groups_launch_template[count.index],
-      "metadata_http_endpoint",
-      local.workers_group_defaults["metadata_http_endpoint"],
-    )
-    http_tokens = lookup(
-      var.worker_groups_launch_template[count.index],
-      "metadata_http_tokens",
-      local.workers_group_defaults["metadata_http_tokens"],
-    )
-    http_put_response_hop_limit = lookup(
-      var.worker_groups_launch_template[count.index],
-      "metadata_http_put_response_hop_limit",
-      local.workers_group_defaults["metadata_http_put_response_hop_limit"],
-    )
-  }
-
-  dynamic "credit_specification" {
-    for_each = lookup(
-      var.worker_groups_launch_template[count.index],
-      "cpu_credits",
-      local.workers_group_defaults["cpu_credits"]
-    ) != null ? [lookup(var.worker_groups_launch_template[count.index], "cpu_credits", local.workers_group_defaults["cpu_credits"])] : []
-    content {
-      cpu_credits = credit_specification.value
-    }
-  }
-
-  monitoring {
-    enabled = lookup(
-      var.worker_groups_launch_template[count.index],
-      "enable_monitoring",
-      local.workers_group_defaults["enable_monitoring"],
-    )
-  }
-
-  dynamic "placement" {
-    for_each = lookup(var.worker_groups_launch_template[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"]) != null ? [lookup(var.worker_groups_launch_template[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"])] : []
-
-    content {
-      tenancy = lookup(
-        var.worker_groups_launch_template[count.index],
-        "launch_template_placement_tenancy",
-        local.workers_group_defaults["launch_template_placement_tenancy"],
-      )
-      group_name = placement.value
-    }
-  }
-
-  dynamic "instance_market_options" {
-    for_each = lookup(var.worker_groups_launch_template[count.index], "market_type", null) == null ? [] : tolist([lookup(var.worker_groups_launch_template[count.index], "market_type", null)])
-    content {
-      market_type = instance_market_options.value
-    }
-  }
-
-  block_device_mappings {
-    device_name = lookup(
-      var.worker_groups_launch_template[count.index],
-      "root_block_device_name",
-      lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows" ? local.workers_group_defaults["root_block_device_name_windows"] : local.workers_group_defaults["root_block_device_name"],
-    )
-
-    ebs {
-      volume_size = lookup(
-        var.worker_groups_launch_template[count.index],
-        "root_volume_size",
-        local.workers_group_defaults["root_volume_size"],
-      )
-      volume_type = lookup(
-        var.worker_groups_launch_template[count.index],
-        "root_volume_type",
-        local.workers_group_defaults["root_volume_type"],
-      )
-      iops = lookup(
-        var.worker_groups_launch_template[count.index],
-        "root_iops",
-        local.workers_group_defaults["root_iops"],
-      )
-      throughput = lookup(
-        var.worker_groups_launch_template[count.index],
-        "root_volume_throughput",
-        local.workers_group_defaults["root_volume_throughput"],
-      )
-      encrypted = lookup(
-        var.worker_groups_launch_template[count.index],
-        "root_encrypted",
-        local.workers_group_defaults["root_encrypted"],
-      )
-      kms_key_id = lookup(
-        var.worker_groups_launch_template[count.index],
-        "root_kms_key_id",
-        local.workers_group_defaults["root_kms_key_id"],
-      )
-      delete_on_termination = true
-    }
-  }
-
-  dynamic "block_device_mappings" {
-    for_each = lookup(var.worker_groups_launch_template[count.index], "additional_ebs_volumes", local.workers_group_defaults["additional_ebs_volumes"])
-    content {
-      device_name = block_device_mappings.value.block_device_name
-
-      ebs {
-        volume_size = lookup(
-          block_device_mappings.value,
-          "volume_size",
-          local.workers_group_defaults["root_volume_size"],
-        )
-        volume_type = lookup(
-          block_device_mappings.value,
-          "volume_type",
-          local.workers_group_defaults["root_volume_type"],
-        )
-        iops = lookup(
-          block_device_mappings.value,
-          "iops",
-          local.workers_group_defaults["root_iops"],
-        )
-        throughput = lookup(
-          block_device_mappings.value,
-          "throughput",
-          local.workers_group_defaults["root_volume_throughput"],
-        )
-        encrypted = lookup(
-          block_device_mappings.value,
-          "encrypted",
-          local.workers_group_defaults["root_encrypted"],
-        )
-        kms_key_id = lookup(
-          block_device_mappings.value,
-          "kms_key_id",
-          local.workers_group_defaults["root_kms_key_id"],
-        )
-        delete_on_termination = lookup(block_device_mappings.value, "delete_on_termination", true)
-      }
-    }
-
-  }
-
-  dynamic "block_device_mappings" {
-    for_each = lookup(var.worker_groups_launch_template[count.index], "additional_instance_store_volumes", local.workers_group_defaults["additional_instance_store_volumes"])
-    content {
-      device_name = block_device_mappings.value.block_device_name
-      virtual_name = lookup(
-        block_device_mappings.value,
-        "virtual_name",
-        local.workers_group_defaults["instance_store_virtual_name"],
-      )
-    }
-  }
-
-  tag_specifications {
-    resource_type = "volume"
-
-    tags = merge(
-      {
-        "Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
-          var.worker_groups_launch_template[count.index],
-          "name",
-          count.index,
-        )}-eks_asg"
-      },
-      var.tags,
-    )
-  }
-
-  tag_specifications {
-    resource_type = "instance"
-
-    tags = merge(
-      {
-        "Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(
-          var.worker_groups_launch_template[count.index],
-          "name",
-          count.index,
-        )}-eks_asg"
-      },
-      { for tag_key, tag_value in var.tags :
-        tag_key => tag_value
-        if tag_key != "Name" && !contains([for tag in lookup(var.worker_groups_launch_template[count.index], "tags", local.workers_group_defaults["tags"]) : tag["key"]], tag_key)
-      }
-    )
-  }
-
-  tags = var.tags
-
-  lifecycle {
-    create_before_destroy = true
-  }
-
-  # Prevent premature access of security group roles and policies by pods that
-  # require permissions on create/destroy that depend on workers.
-  depends_on = [
-    aws_security_group_rule.workers_egress_internet,
-    aws_security_group_rule.workers_ingress_self,
-    aws_security_group_rule.workers_ingress_cluster,
-    aws_security_group_rule.workers_ingress_cluster_kubelet,
-    aws_security_group_rule.workers_ingress_cluster_https,
-    aws_security_group_rule.workers_ingress_cluster_primary,
-    aws_security_group_rule.cluster_primary_ingress_workers,
-    aws_iam_role_policy_attachment.workers_AmazonEKSWorkerNodePolicy,
-    aws_iam_role_policy_attachment.workers_AmazonEKS_CNI_Policy,
-    aws_iam_role_policy_attachment.workers_AmazonEC2ContainerRegistryReadOnly,
-    aws_iam_role_policy_attachment.workers_additional_policies
-  ]
-}
-
-resource "aws_iam_instance_profile" "workers_launch_template" {
-  count       = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_template_count : 0
-  name_prefix = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
-  role = lookup(
-    var.worker_groups_launch_template[count.index],
-    "iam_role_id",
-    local.default_iam_role_id,
-  )
-  path = var.iam_path
-  tags = var.tags
-
-  lifecycle {
-    create_before_destroy = true
-  }
-}