---
- assert:
that:
- - MANAGEMENT_EVENT_FAILURE_QUEUE != ''
- - MANAGEMENT_EVENT_QUEUE != ''
- MANAGEMENT_SUBNET != ''
- DEFAULT_AMI != ''
+ - version != ''
+ - phase != ''
tags: ['check_vars']
-- name: Management failure queue.
- sqs_queue:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
- default_visibility_timeout: 30
- message_retention_period: 1209600
- maximum_message_size: 262144
- delivery_delay: 0
- receive_message_wait_time: 0
- register: management_failure_queue
-
-# as of Ansible 2.2.1.0 sqs_queue does not seem to be returning queue_arn
-
-- name: Managment queue.
- sqs_queue:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_EVENT_QUEUE }}"
- default_visibility_timeout: 30
- message_retention_period: 345600
- maximum_message_size: 262144
- delivery_delay: 0
- receive_message_wait_time: 20
- redrive_policy:
- maxReceiveCount: 5
- deadLetterTargetArn: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
-# deadLetterTargetArn: "{{ management_failure_queue.queue_arn }}"
- register: management_queue
-
-- name: Management topic and subscription.
- sns_topic:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_EVENT_QUEUE }}"
- display_name: "{{ MANAGEMENT_EVENT_QUEUE_SHORT }}"
- purge_subscriptions: False
- subscriptions:
-# - endpoint: "{{ management_queue.queue_arn }}"
- - endpoint: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
- protocol: "sqs"
- register: management_topic
-
-- name: Management notice topic
- sns_topic:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_NOTICE_TOPIC }}"
- display_name: "{{ MANAGEMENT_NOTICE_TOPIC_SHORT }}"
- purge_subscriptions: False
- register: management_notice_topic
-
-- name: Management backup bucket
- when: MANAGEMENT_BACKUP_S3_BUCKET is defined
- s3_bucket:
- state: present
- name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
-
-- name: sg ssh
- ec2_group:
- vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ vpc_region }}"
- state: present
- name: ssh
- description: "allow ssh from anywhere"
- purge_rules: false
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
- register: sg_ssh
-
- name: sg management-elb
ec2_group:
vpc_id: "{{ vpc.vpc.id }}"
interval: 30
unhealthy_threshold: 2
healthy_threshold: 2
+ register: elb_management
- name: management key
ec2_key:
# - arn:aws:iam::{{ ACCT_ID }}:policy/base-policy
# - arn:aws:iam::{{ ACCT_ID }}:policy/management-policy
-# will need to rev name-version when changing AMI
-- name: management lc
- ec2_lc:
- region: "{{ vpc_region }}"
- name: management-0000
- image_id: "{{ DEFAULT_AMI }}"
- key_name: "{{ MANAGEMENT_KEY_NAME }}"
- instance_profile_name: management
- security_groups:
- - "{{ sg_management.group_id }}"
- - "{{ sg_ssh.group_id }}"
- instance_type: m4.large
- volumes:
-# setting the root volume seems to prevent instances from launching
-# - device_name: /dev/sda1
-# volume_size: 8
-# volume_type: gp2
-# delete_on_termination: true
- - device_name: /dev/sdb
- ephemeral: ephemeral0
- - device_name: /dev/sdc
- ephemeral: ephemeral1
- - device_name: /dev/sdd
- ephemeral: ephemeral2
- - device_name: /dev/sde
- ephemeral: ephemeral3
- register: mgmt_lc
-
-- name: management asg
- ec2_asg:
- region: "{{ vpc_region }}"
- name: management
+- include_role:
+ name: launchconfig
+ vars:
+ security_group_ids:
+ - "{{ sg_ssh.group_id }}"
+ - "{{ sg_icmp.group_id }}"
+ - "{{ sg_management.group_id }}"
+
+# # will need to rev name-version when changing AMI
+# - name: management lc
+# ec2_lc:
+# region: "{{ vpc_region }}"
+# name: management-0000
+# image_id: "{{ DEFAULT_AMI }}"
+# key_name: "{{ MANAGEMENT_KEY_NAME }}"
+# instance_profile_name: management
+# security_groups:
+# - "{{ sg_management.group_id }}"
+# - "{{ sg_ssh.group_id }}"
+# - "{{ sg_icmp.group_id }}"
+# instance_type: m4.large
+# volumes:
+# # setting the root volume seems to prevent instances from launching
+# # - device_name: /dev/sda1
+# # volume_size: 8
+# # volume_type: gp2
+# # delete_on_termination: true
+# - device_name: /dev/sdb
+# ephemeral: ephemeral0
+# - device_name: /dev/sdc
+# ephemeral: ephemeral1
+# - device_name: /dev/sdd
+# ephemeral: ephemeral2
+# - device_name: /dev/sde
+# ephemeral: ephemeral3
+# register: mgmt_lc
+
+- include_role:
+ name: autoscalinggroup
+ vars:
+ load_balancers: "{{ elb_management.elb.name }}"
min_size: 1
max_size: 1
- desired_capacity: 1
- default_cooldown: 10
- vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
- launch_config_name: "{{ mgmt_lc.name }}"
- notification_topic: "{{ management_topic.sns_arn }}"
- notification_types:
- - autoscaling:EC2_INSTANCE_LAUNCH
- load_balancers:
- - management-int-elb
- tags:
- - module: management
- propagate_at_launch: yes
+ subnet_ids: "{{ MANAGEMENT_SUBNET }}"
+
+# - name: management asg
+# ec2_asg:
+# region: "{{ vpc_region }}"
+# name: management
+# min_size: 1
+# max_size: 1
+# desired_capacity: 1
+# default_cooldown: 10
+# vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
+# launch_config_name: "{{ mgmt_lc.name|default('checkmode') }}"
+# notification_topic: "{{ management_topic.sns_arn }}"
+# notification_types:
+# - autoscaling:EC2_INSTANCE_LAUNCH
+# load_balancers:
+# - management-int-elb
+# tags:
+# - account: "{{ ACCT_NAME }}"
+# propagate_at_launch: yes
+# - module: management
+# propagate_at_launch: yes
+# - stack: ""
+# propagate_at_launch: yes
+# - country: ""
+# propagate_at_launch: yes
+# - phase: dev
+# propagate_at_launch: yes
- name: not implemented yet
debug: