further progress on infrastructure buildout
[awsible] / roles / aws-infrastructure / tasks / main.yml
index 0f1a39073aa713ed1db8ee5a59f8d79be2bd2416..b4663f5ca1fd4fe256d862494107431bd94faa66 100644 (file)
-- action: ec2_facts
+---
+- assert:
+    that:
+    - MANAGEMENT_EVENT_FAILURE_QUEUE != ''
+    - MANAGEMENT_EVENT_QUEUE != ''
+    - MANAGEMENT_SUBNET != ''
+    - DEFAULT_AMI != ''
+  tags: ['check_vars']
+
+- name: Management failure queue.
+  sqs_queue:
+    state: present
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
+    default_visibility_timeout: 30
+    message_retention_period: 1209600
+    maximum_message_size: 262144
+    delivery_delay: 0
+    receive_message_wait_time: 0
+  register: management_failure_queue
+
+- debug:
+    var: management_failure_queue
 
 - name: Managment queue.
   sqs_queue:
     state: present
+    region: "{{ vpc_region }}"
     name: "{{ MANAGEMENT_EVENT_QUEUE }}"
     default_visibility_timeout: 30
     message_retention_period: 345600
     maximum_message_size: 262144
     delivery_delay: 0
     receive_message_wait_time: 20
-
-- name: Management failure queue.
-    sqs_queue:
-      state: present
-      name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
-      default_visibility_timeout: 30
-      message_retention_period: 1209600
-      maximum_message_size: 262144
-      delivery_delay: 0
-      receive_message_wait_time: 0
+    redrive_policy:
+      maxReceiveCount: 5
+      deadLetterTargetArn: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
+#      deadLetterTargetArn: "{{ management_failure_queue.queue_arn }}"
+  register: management_queue
 
 - name: Management topic and subscription.
-    sns_topic:
-      state: present
-      name: "{{ MANAGEMENT_EVENT_QUEUE }}"
-      display_name: "management"
-      purge_subscriptions: False
-      subscriptions:
-      - endpoint: "arn:aws:sqs:{{ ansible_ec2_placement_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
-        protocol: "sqs"
-
-- name: Ensure management backup bucket exists.
-  s3_bucket:
+  sns_topic:
     state: present
-    when: MANAGEMENT_BACKUP_S3_BUCKET|defined
-    name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_EVENT_QUEUE }}"
+    display_name: "{{ MANAGEMENT_EVENT_QUEUE_SHORT }}"
+    purge_subscriptions: False
+    subscriptions:
+#   - endpoint: "{{ management_queue.queue_arn }}"
+    - endpoint: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
+      protocol: "sqs"
+  register: management_topic
+
+- name: Management notice topic
+  sns_topic:
+    state: present
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_NOTICE_TOPIC }}"
+    display_name: "{{ MANAGEMENT_NOTICE_TOPIC_SHORT }}"
+    purge_subscriptions: False
+  register: management_notice_topic
+
+# - name: Ensure management backup bucket exists.
+#   s3_bucket:
+#     when: MANAGEMENT_BACKUP_S3_BUCKET is defined
+#     state: present
+#     name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
 
 - name: sg ssh
-    ec2_group:
-      state: present
-      name: sshOnly
-      description: "allow ssh from anywhere"
-      purge_rules: false
-      rules:
-      - proto: tcp
-        from_port: 22
-        to_port: 22
-        cidr_ip: 0.0.0.0/0
-      rules_egress:
-      - proto: all
-        cidr_ip: 0.0.0.0/0
-
-  - name: sg management-elb
-    ec2_group:
-      state: present
-      name: management-elb
-      description: "sg for internal elb for monitoring management"
-      purge_rules: false
-      rules:
-      - proto: tcp
-        from_port: 22
-        to_port: 22
-        cidr_ip: 0.0.0.0/0
-      rules_egress:
-      - proto: all
-        cidr_ip: 0.0.0.0/0
-
-  - name: sg management
-    ec2_group:
-      state: present
-      name: management
-      description: "sg for management"
-      purge_rules: false
-      rules:
-      - proto: all
-        group_name: management
-      - proto: all
-        group_name: management-elb
-
-  - name: elb management-int-elb
-    ec2_elb_lb:
-      state: present
-      name: management-int-elb
-      cross_az_load_balancing: yes
-      scheme: internal
-      subnets: "{{ MANAGEMENT_SUBNET }}"
-      security_group_names:
-      - management-elb
-      listeners:
-      - protocol: tcp
-        load_balancer_port: 22
-        instance_port: 22
-      health_check:
-        ping_protocol: tcp
-        ping_port: 22
-        response_timeout: 5
-        interval: 30
-        unhealthy_threshold: 2
-        healthy_threshold: 2
-
-  - name: management iam
-    iam:
-      name: management
-      iam_type: role
-      state: present
-
-  - name: management lc
-    ec2_lc:
-      name: management-0000
-      image_id: "{{ DEFAULT_AMI }}"
-      key_name: management-key
-      security_groups:
-      - management
-      - sshOnly
-      instance_type: m4.large
-      volumes:
-      - device_name: /dev/sda1
-        volume_size: 8
-        volume_type: gp2
-        delete_on_termination: true
-      - device_name: /dev/sdb
-        ephemeral: ephemeral0
-      - device_name: /dev/sdc
-        ephemeral: ephemeral1
-      - device_name: /dev/sdd
-        ephemeral: ephemeral2
-      - device_name: /dev/sde
-        ephemeral: ephemeral3
-    register: mgmt_lc
-
-  - name: management asg
-    ec2_asg:
-      name: management
-      min_size: 1
-      max_size: 1
-      desired_capacity: 1
-      vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
-      launch_config_name: "{{ mgmt_lc.something.name }}"
-      tags:
-      - module: management
-        propogate_at_launch: yes
+  ec2_group:
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    state: present
+    name: ssh
+    description: "allow ssh from anywhere"
+    purge_rules: false
+    rules:
+    - proto: tcp
+      from_port: 22
+      to_port: 22
+      cidr_ip: 0.0.0.0/0
+    rules_egress:
+    - proto: all
+      cidr_ip: 0.0.0.0/0
+  register: sg_ssh
+
+- name: sg management-elb
+  ec2_group:
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    state: present
+    name: management-elb
+    description: "sg for internal elb for monitoring management"
+    purge_rules: false
+    rules:
+    - proto: tcp
+      from_port: 22
+      to_port: 22
+      cidr_ip: 0.0.0.0/0
+    rules_egress:
+    - proto: all
+      cidr_ip: 0.0.0.0/0
+
+- name: sg management
+  ec2_group:
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    state: present
+    name: management
+    description: "sg for management"
+    purge_rules: false
+    rules:
+    - proto: all
+      group_name: management
+    - proto: all
+      group_name: management-elb
+  register: sg_management
+
+- debug:
+    var: sg_management
+
+- name: elb management-int-elb
+  ec2_elb_lb:
+    region: "{{ vpc_region }}"
+    state: present
+    name: management-int-elb
+    cross_az_load_balancing: yes
+    scheme: internal
+    subnets: "{{ MANAGEMENT_SUBNET }}"
+    security_group_names:
+    - management-elb
+    listeners:
+    - protocol: tcp
+      load_balancer_port: 22
+      instance_port: 22
+    health_check:
+      ping_protocol: tcp
+      ping_port: 22
+      response_timeout: 5
+      interval: 30
+      unhealthy_threshold: 2
+      healthy_threshold: 2
+
+- name: management key
+  ec2_key:
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_KEY_NAME }}"
+    key_material: "{{ item }}"
+  with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub
+
+- name: management iam
+  iam:
+    name: management
+    iam_type: role
+    state: present
+
+- name: management lc
+  ec2_lc:
+    region: "{{ vpc_region }}"
+    name: management-0000
+    image_id: "{{ DEFAULT_AMI }}"
+    key_name: "{{ MANAGEMENT_KEY_NAME }}"
+    instance_profile_name: management
+    security_groups:
+      - "{{ sg_management.group_id }}"
+      - "{{ sg_ssh.group_id }}"
+    instance_type: m4.large
+    volumes:
+    - device_name: /dev/sda1
+      volume_size: 8
+      volume_type: gp2
+      delete_on_termination: true
+    - device_name: /dev/sdb
+      ephemeral: ephemeral0
+    - device_name: /dev/sdc
+      ephemeral: ephemeral1
+    - device_name: /dev/sdd
+      ephemeral: ephemeral2
+    - device_name: /dev/sde
+      ephemeral: ephemeral3
+  register: mgmt_lc
+
+- debug:
+    var: mgmt_lc
+
+- name: management asg
+  ec2_asg:
+    region: "{{ vpc_region }}"
+    name: management
+    min_size: 1
+    max_size: 1
+    desired_capacity: 1
+    vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
+    launch_config_name: "{{ mgmt_lc.name }}"
+    notification_topic: "{{ management_topic.sns_arn }}"
+    notification_types:
+      - autoscaling:EC2_INSTANCE_LAUNCH
+    load_balancers:
+    - management-int-elb
+    tags:
+    - module: management
+      propogate_at_launch: yes