+++ /dev/null
----
-- assert:
- that:
- - MANAGEMENT_EVENT_FAILURE_QUEUE != ''
- - MANAGEMENT_EVENT_QUEUE != ''
- - MANAGEMENT_SUBNET != ''
- - DEFAULT_AMI != ''
- tags: ['check_vars']
-
-- name: Management failure queue.
- sqs_queue:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
- default_visibility_timeout: 30
- message_retention_period: 1209600
- maximum_message_size: 262144
- delivery_delay: 0
- receive_message_wait_time: 0
- register: management_failure_queue
-
-# as of Ansible 2.2.1.0 sqs_queue does not seem to be returning queue_arn
-
-- name: Managment queue.
- sqs_queue:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_EVENT_QUEUE }}"
- default_visibility_timeout: 30
- message_retention_period: 345600
- maximum_message_size: 262144
- delivery_delay: 0
- receive_message_wait_time: 20
- redrive_policy:
- maxReceiveCount: 5
- deadLetterTargetArn: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
-# deadLetterTargetArn: "{{ management_failure_queue.queue_arn }}"
- register: management_queue
-
-- name: Management topic and subscription.
- sns_topic:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_EVENT_QUEUE }}"
- display_name: "{{ MANAGEMENT_EVENT_QUEUE_SHORT }}"
- purge_subscriptions: False
- subscriptions:
-# - endpoint: "{{ management_queue.queue_arn }}"
- - endpoint: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
- protocol: "sqs"
- register: management_topic
-
-- name: Management notice topic
- sns_topic:
- state: present
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_NOTICE_TOPIC }}"
- display_name: "{{ MANAGEMENT_NOTICE_TOPIC_SHORT }}"
- purge_subscriptions: False
- register: management_notice_topic
-
-- name: Management backup bucket
- when: MANAGEMENT_BACKUP_S3_BUCKET is defined
- s3_bucket:
- state: present
- name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
-
-- name: sg ssh
- ec2_group:
- vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ vpc_region }}"
- state: present
- name: ssh
- description: "allow ssh from anywhere"
- purge_rules: false
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
- register: sg_ssh
-
-- name: sg management-elb
- ec2_group:
- vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ vpc_region }}"
- state: present
- name: management-elb
- description: "sg for internal elb for monitoring management"
- purge_rules: false
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
-
-- name: sg management
- ec2_group:
- vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ vpc_region }}"
- state: present
- name: management
- description: "sg for management"
- purge_rules: false
- rules:
- - proto: all
- group_name: management
- - proto: all
- group_name: management-elb
- register: sg_management
-
-- name: elb management-int-elb
- ec2_elb_lb:
- region: "{{ vpc_region }}"
- state: present
- name: management-int-elb
- cross_az_load_balancing: yes
- scheme: internal
- subnets: "{{ MANAGEMENT_SUBNET }}"
- security_group_names:
- - management-elb
- listeners:
- - protocol: tcp
- load_balancer_port: 22
- instance_port: 22
- health_check:
- ping_protocol: tcp
- ping_port: 22
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
-- name: management key
- ec2_key:
- region: "{{ vpc_region }}"
- name: "{{ MANAGEMENT_KEY_NAME }}"
- key_material: "{{ item }}"
- with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub
-
-- name: management iam
- iam:
- name: management
- iam_type: role
- state: present
-
-# this is only ansible 2.3+
-# - name: management role policies
-# iam_role:
-# name: management
-# state: present
-# managed_policy:
-# - arn:aws:iam::{{ ACCT_ID }}:policy/base-policy
-# - arn:aws:iam::{{ ACCT_ID }}:policy/management-policy
-
-# will need to rev name-version when changing AMI
-- name: management lc
- ec2_lc:
- region: "{{ vpc_region }}"
- name: management-0000
- image_id: "{{ DEFAULT_AMI }}"
- key_name: "{{ MANAGEMENT_KEY_NAME }}"
- instance_profile_name: management
- security_groups:
- - "{{ sg_management.group_id }}"
- - "{{ sg_ssh.group_id }}"
- instance_type: m4.large
- volumes:
-# setting the root volume seems to prevent instances from launching
-# - device_name: /dev/sda1
-# volume_size: 8
-# volume_type: gp2
-# delete_on_termination: true
- - device_name: /dev/sdb
- ephemeral: ephemeral0
- - device_name: /dev/sdc
- ephemeral: ephemeral1
- - device_name: /dev/sdd
- ephemeral: ephemeral2
- - device_name: /dev/sde
- ephemeral: ephemeral3
- register: mgmt_lc
-
-- name: management asg
- ec2_asg:
- region: "{{ vpc_region }}"
- name: management
- min_size: 1
- max_size: 1
- desired_capacity: 1
- default_cooldown: 10
- vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
- launch_config_name: "{{ mgmt_lc.name }}"
- notification_topic: "{{ management_topic.sns_arn }}"
- notification_types:
- - autoscaling:EC2_INSTANCE_LAUNCH
- load_balancers:
- - management-int-elb
- tags:
- - module: management
- propagate_at_launch: yes
-
-- name: not implemented yet
- debug:
- msg: |
- attach policies to iam role