receive_message_wait_time: 0
register: management_failure_queue
-- debug:
- var: management_failure_queue
+# as of Ansible 2.2.1.0 sqs_queue does not seem to be returning queue_arn
- name: Managment queue.
sqs_queue:
purge_subscriptions: False
register: management_notice_topic
-# - name: Ensure management backup bucket exists.
-# s3_bucket:
-# when: MANAGEMENT_BACKUP_S3_BUCKET is defined
-# state: present
-# name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
+- name: Management backup bucket
+ when: MANAGEMENT_BACKUP_S3_BUCKET is defined
+ s3_bucket:
+ state: present
+ name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
- name: sg ssh
ec2_group:
group_name: management-elb
register: sg_management
-- debug:
- var: sg_management
-
- name: elb management-int-elb
ec2_elb_lb:
region: "{{ vpc_region }}"
iam_type: role
state: present
+# this is only ansible 2.3+
+# - name: management role policies
+# iam_role:
+# name: management
+# state: present
+# managed_policy:
+# - arn:aws:iam::{{ ACCT_ID }}:policy/base-policy
+# - arn:aws:iam::{{ ACCT_ID }}:policy/management-policy
+
+# will need to rev name-version when changing AMI
- name: management lc
ec2_lc:
region: "{{ vpc_region }}"
- "{{ sg_ssh.group_id }}"
instance_type: m4.large
volumes:
- - device_name: /dev/sda1
- volume_size: 8
- volume_type: gp2
- delete_on_termination: true
+# setting the root volume seems to prevent instances from launching
+# - device_name: /dev/sda1
+# volume_size: 8
+# volume_type: gp2
+# delete_on_termination: true
- device_name: /dev/sdb
ephemeral: ephemeral0
- device_name: /dev/sdc
ephemeral: ephemeral3
register: mgmt_lc
-- debug:
- var: mgmt_lc
-
- name: management asg
ec2_asg:
region: "{{ vpc_region }}"
min_size: 1
max_size: 1
desired_capacity: 1
+ default_cooldown: 10
vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
launch_config_name: "{{ mgmt_lc.name }}"
notification_topic: "{{ management_topic.sns_arn }}"
- management-int-elb
tags:
- module: management
- propogate_at_launch: yes
+ propagate_at_launch: yes
+- name: not implemented yet
+ debug:
+ msg: |
+ attach policies to iam role
reuse_existing_ip_allowed: yes
register: access_eip
+# As of ansible 2.2.1.0, it cannot set anything on the main route table
+# due to limitations of the underlying boto libarary.
+#
+# - name: route table facts
+# ec2_vpc_route_table_facts:
+# region: "{{ vpc_region }}"
+# filters:
+# vpc-id: "{{ vpc.vpc.id }}"
+# register: vpc_default_route
+#
+# - debug:
+# var: vpc_default_route
+#
+# - name: Main route table
+# ec2_vpc_route_table:
+# state: present
+# vpc_id: "{{ vpc.vpc.id }}"
+# region: "{{ vpc_region }}"
+# lookup: id
+# route_table_id: "{{ vpc_default_route.route_tables[0].id}}"
+# routes:
+# - gateway_id: igw
+# register: main_route
+#
+# - debug:
+# var: main_route
+
- name: Private route table
ec2_vpc_route_table:
state: present
subnets: "{{ vpc_subnets_priv|map(attribute='cidr')|list }}"
register: private_rt
-- name: Public route table
- ec2_vpc_route_table:
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ vpc_region }}"
- tags:
- Name: "Public-Routes-vpc-{{ vpc_region }}"
- zone: pub
- managed: 'no'
- subnets: "{{ vpc_subnets_pub|map(attribute='cidr')|list }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- register: public_rt
+# Using Main route table for public subnets, for now.
+#
+# - name: Public route table
+# ec2_vpc_route_table:
+# state: present
+# vpc_id: "{{ vpc.vpc.id }}"
+# region: "{{ vpc_region }}"
+# tags:
+# Name: "Public-Routes-vpc-{{ vpc_region }}"
+# zone: pub
+# managed: 'no'
+# subnets: "{{ vpc_subnets_pub|map(attribute='cidr')|list }}"
+# routes:
+# - dest: 0.0.0.0/0
+# gateway_id: "{{ igw.gateway_id }}"
+# register: public_rt
- name: not implemented yet
debug:
msg: |
+ Add IGW to VPC Main route table
Change pub-subnets to auto-assign external IPs