further progress on infrastructure buildout
authorJustin Wind <j.wind@partner.samsung.com>
Thu, 9 Mar 2017 00:12:33 +0000 (16:12 -0800)
committerJustin Wind <j.wind@partner.samsung.com>
Thu, 9 Mar 2017 00:12:33 +0000 (16:12 -0800)
.gitignore [new file with mode: 0644]
ansible.cfg
roles/aws-infrastructure/defaults/main.yml [new file with mode: 0644]
roles/aws-infrastructure/meta/main.yml [new file with mode: 0644]
roles/aws-infrastructure/tasks/main.yml
roles/aws-vpc-infrastructure/meta/main.yml [new file with mode: 0644]
roles/aws-vpc-infrastructure/tasks/main.yml [new file with mode: 0644]
roles/aws-vpc/tasks/main.yml

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..81bbbae
--- /dev/null
@@ -0,0 +1 @@
+keys/
index a48806db11782bfbe1268e2c15c7a6158a936662..c6ee886c3e0a371a8d3c3625165208608a52e4c4 100644 (file)
@@ -1,4 +1,5 @@
 [defaults]
+retry_files_enabled = False
 host_key_checking = False
 inventory = inventory
 remote_user = ec2-user
diff --git a/roles/aws-infrastructure/defaults/main.yml b/roles/aws-infrastructure/defaults/main.yml
new file mode 100644 (file)
index 0000000..1830f02
--- /dev/null
@@ -0,0 +1,7 @@
+---
+MANAGEMENT_EVENT_QUEUE: management-events
+MANAGEMENT_EVENT_QUEUE_SHORT: management
+MANAGEMENT_EVENT_FAILURE_QUEUE: "{{ MANAGEMENT_EVENT_QUEUE }}-failed"
+MANAGEMENT_NOTICE_TOPIC: management-notifications
+MANAGEMENT_NOTICE_TOPIC_SHORT: notices
+MANAGEMENT_KEY_NAME: management
\ No newline at end of file
diff --git a/roles/aws-infrastructure/meta/main.yml b/roles/aws-infrastructure/meta/main.yml
new file mode 100644 (file)
index 0000000..96ecf5e
--- /dev/null
@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: aws-vpc }
\ No newline at end of file
index 0f1a39073aa713ed1db8ee5a59f8d79be2bd2416..b4663f5ca1fd4fe256d862494107431bd94faa66 100644 (file)
-- action: ec2_facts
+---
+- assert:
+    that:
+    - MANAGEMENT_EVENT_FAILURE_QUEUE != ''
+    - MANAGEMENT_EVENT_QUEUE != ''
+    - MANAGEMENT_SUBNET != ''
+    - DEFAULT_AMI != ''
+  tags: ['check_vars']
+
+- name: Management failure queue.
+  sqs_queue:
+    state: present
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
+    default_visibility_timeout: 30
+    message_retention_period: 1209600
+    maximum_message_size: 262144
+    delivery_delay: 0
+    receive_message_wait_time: 0
+  register: management_failure_queue
+
+- debug:
+    var: management_failure_queue
 
 - name: Managment queue.
   sqs_queue:
     state: present
+    region: "{{ vpc_region }}"
     name: "{{ MANAGEMENT_EVENT_QUEUE }}"
     default_visibility_timeout: 30
     message_retention_period: 345600
     maximum_message_size: 262144
     delivery_delay: 0
     receive_message_wait_time: 20
-
-- name: Management failure queue.
-    sqs_queue:
-      state: present
-      name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
-      default_visibility_timeout: 30
-      message_retention_period: 1209600
-      maximum_message_size: 262144
-      delivery_delay: 0
-      receive_message_wait_time: 0
+    redrive_policy:
+      maxReceiveCount: 5
+      deadLetterTargetArn: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
+#      deadLetterTargetArn: "{{ management_failure_queue.queue_arn }}"
+  register: management_queue
 
 - name: Management topic and subscription.
-    sns_topic:
-      state: present
-      name: "{{ MANAGEMENT_EVENT_QUEUE }}"
-      display_name: "management"
-      purge_subscriptions: False
-      subscriptions:
-      - endpoint: "arn:aws:sqs:{{ ansible_ec2_placement_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
-        protocol: "sqs"
-
-- name: Ensure management backup bucket exists.
-  s3_bucket:
+  sns_topic:
     state: present
-    when: MANAGEMENT_BACKUP_S3_BUCKET|defined
-    name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_EVENT_QUEUE }}"
+    display_name: "{{ MANAGEMENT_EVENT_QUEUE_SHORT }}"
+    purge_subscriptions: False
+    subscriptions:
+#   - endpoint: "{{ management_queue.queue_arn }}"
+    - endpoint: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
+      protocol: "sqs"
+  register: management_topic
+
+- name: Management notice topic
+  sns_topic:
+    state: present
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_NOTICE_TOPIC }}"
+    display_name: "{{ MANAGEMENT_NOTICE_TOPIC_SHORT }}"
+    purge_subscriptions: False
+  register: management_notice_topic
+
+# - name: Ensure management backup bucket exists.
+#   s3_bucket:
+#     when: MANAGEMENT_BACKUP_S3_BUCKET is defined
+#     state: present
+#     name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
 
 - name: sg ssh
-    ec2_group:
-      state: present
-      name: sshOnly
-      description: "allow ssh from anywhere"
-      purge_rules: false
-      rules:
-      - proto: tcp
-        from_port: 22
-        to_port: 22
-        cidr_ip: 0.0.0.0/0
-      rules_egress:
-      - proto: all
-        cidr_ip: 0.0.0.0/0
-
-  - name: sg management-elb
-    ec2_group:
-      state: present
-      name: management-elb
-      description: "sg for internal elb for monitoring management"
-      purge_rules: false
-      rules:
-      - proto: tcp
-        from_port: 22
-        to_port: 22
-        cidr_ip: 0.0.0.0/0
-      rules_egress:
-      - proto: all
-        cidr_ip: 0.0.0.0/0
-
-  - name: sg management
-    ec2_group:
-      state: present
-      name: management
-      description: "sg for management"
-      purge_rules: false
-      rules:
-      - proto: all
-        group_name: management
-      - proto: all
-        group_name: management-elb
-
-  - name: elb management-int-elb
-    ec2_elb_lb:
-      state: present
-      name: management-int-elb
-      cross_az_load_balancing: yes
-      scheme: internal
-      subnets: "{{ MANAGEMENT_SUBNET }}"
-      security_group_names:
-      - management-elb
-      listeners:
-      - protocol: tcp
-        load_balancer_port: 22
-        instance_port: 22
-      health_check:
-        ping_protocol: tcp
-        ping_port: 22
-        response_timeout: 5
-        interval: 30
-        unhealthy_threshold: 2
-        healthy_threshold: 2
-
-  - name: management iam
-    iam:
-      name: management
-      iam_type: role
-      state: present
-
-  - name: management lc
-    ec2_lc:
-      name: management-0000
-      image_id: "{{ DEFAULT_AMI }}"
-      key_name: management-key
-      security_groups:
-      - management
-      - sshOnly
-      instance_type: m4.large
-      volumes:
-      - device_name: /dev/sda1
-        volume_size: 8
-        volume_type: gp2
-        delete_on_termination: true
-      - device_name: /dev/sdb
-        ephemeral: ephemeral0
-      - device_name: /dev/sdc
-        ephemeral: ephemeral1
-      - device_name: /dev/sdd
-        ephemeral: ephemeral2
-      - device_name: /dev/sde
-        ephemeral: ephemeral3
-    register: mgmt_lc
-
-  - name: management asg
-    ec2_asg:
-      name: management
-      min_size: 1
-      max_size: 1
-      desired_capacity: 1
-      vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
-      launch_config_name: "{{ mgmt_lc.something.name }}"
-      tags:
-      - module: management
-        propogate_at_launch: yes
+  ec2_group:
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    state: present
+    name: ssh
+    description: "allow ssh from anywhere"
+    purge_rules: false
+    rules:
+    - proto: tcp
+      from_port: 22
+      to_port: 22
+      cidr_ip: 0.0.0.0/0
+    rules_egress:
+    - proto: all
+      cidr_ip: 0.0.0.0/0
+  register: sg_ssh
+
+- name: sg management-elb
+  ec2_group:
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    state: present
+    name: management-elb
+    description: "sg for internal elb for monitoring management"
+    purge_rules: false
+    rules:
+    - proto: tcp
+      from_port: 22
+      to_port: 22
+      cidr_ip: 0.0.0.0/0
+    rules_egress:
+    - proto: all
+      cidr_ip: 0.0.0.0/0
+
+- name: sg management
+  ec2_group:
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    state: present
+    name: management
+    description: "sg for management"
+    purge_rules: false
+    rules:
+    - proto: all
+      group_name: management
+    - proto: all
+      group_name: management-elb
+  register: sg_management
+
+- debug:
+    var: sg_management
+
+- name: elb management-int-elb
+  ec2_elb_lb:
+    region: "{{ vpc_region }}"
+    state: present
+    name: management-int-elb
+    cross_az_load_balancing: yes
+    scheme: internal
+    subnets: "{{ MANAGEMENT_SUBNET }}"
+    security_group_names:
+    - management-elb
+    listeners:
+    - protocol: tcp
+      load_balancer_port: 22
+      instance_port: 22
+    health_check:
+      ping_protocol: tcp
+      ping_port: 22
+      response_timeout: 5
+      interval: 30
+      unhealthy_threshold: 2
+      healthy_threshold: 2
+
+- name: management key
+  ec2_key:
+    region: "{{ vpc_region }}"
+    name: "{{ MANAGEMENT_KEY_NAME }}"
+    key_material: "{{ item }}"
+  with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub
+
+- name: management iam
+  iam:
+    name: management
+    iam_type: role
+    state: present
+
+- name: management lc
+  ec2_lc:
+    region: "{{ vpc_region }}"
+    name: management-0000
+    image_id: "{{ DEFAULT_AMI }}"
+    key_name: "{{ MANAGEMENT_KEY_NAME }}"
+    instance_profile_name: management
+    security_groups:
+      - "{{ sg_management.group_id }}"
+      - "{{ sg_ssh.group_id }}"
+    instance_type: m4.large
+    volumes:
+    - device_name: /dev/sda1
+      volume_size: 8
+      volume_type: gp2
+      delete_on_termination: true
+    - device_name: /dev/sdb
+      ephemeral: ephemeral0
+    - device_name: /dev/sdc
+      ephemeral: ephemeral1
+    - device_name: /dev/sdd
+      ephemeral: ephemeral2
+    - device_name: /dev/sde
+      ephemeral: ephemeral3
+  register: mgmt_lc
+
+- debug:
+    var: mgmt_lc
+
+- name: management asg
+  ec2_asg:
+    region: "{{ vpc_region }}"
+    name: management
+    min_size: 1
+    max_size: 1
+    desired_capacity: 1
+    vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
+    launch_config_name: "{{ mgmt_lc.name }}"
+    notification_topic: "{{ management_topic.sns_arn }}"
+    notification_types:
+      - autoscaling:EC2_INSTANCE_LAUNCH
+    load_balancers:
+    - management-int-elb
+    tags:
+    - module: management
+      propogate_at_launch: yes
 
diff --git a/roles/aws-vpc-infrastructure/meta/main.yml b/roles/aws-vpc-infrastructure/meta/main.yml
new file mode 100644 (file)
index 0000000..96ecf5e
--- /dev/null
@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: aws-vpc }
\ No newline at end of file
diff --git a/roles/aws-vpc-infrastructure/tasks/main.yml b/roles/aws-vpc-infrastructure/tasks/main.yml
new file mode 100644 (file)
index 0000000..03923fe
--- /dev/null
@@ -0,0 +1,71 @@
+---
+- assert:
+    that:
+    - vpc_region != ''
+    - vpc_subnets_pub != ''
+    - vpc_subnets_priv != ''
+  tags: ['check_vars']
+
+- name: IGW
+  ec2_vpc_igw:
+    state: present
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+  register: igw
+
+- name: Name IGW
+  ec2_tag:
+    state: present
+    resource: "{{ igw.gateway_id }}"
+    region: "{{ vpc_region }}"
+    tags:
+      Name: "igw-{{ vpc_region }}"
+
+- name: Subnets
+  with_items: "{{ vpc_subnets_pub + vpc_subnets_priv }}"
+  ec2_vpc_subnet:
+    state: present
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    cidr: "{{ item.cidr }}"
+    az: "{{ item.az }}"
+    tags: "{{ item.resource_tags }}"
+
+- name: Access/NAT EIP
+  ec2_eip:
+    in_vpc: yes
+    region: "{{ vpc_region }}"
+    reuse_existing_ip_allowed: yes
+  register: access_eip
+
+- name: Private route table
+  ec2_vpc_route_table:
+    state: present
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    tags:
+      Name: "Private-Routes-vpc-{{ vpc_region }}"
+      zone: priv
+      managed: 'yes'
+    subnets: "{{ vpc_subnets_priv|map(attribute='cidr')|list }}"
+  register: private_rt
+
+- name: Public route table
+  ec2_vpc_route_table:
+    state: present
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ vpc_region }}"
+    tags:
+      Name: "Public-Routes-vpc-{{ vpc_region }}"
+      zone: pub
+      managed: 'no'
+    subnets: "{{ vpc_subnets_pub|map(attribute='cidr')|list }}"
+    routes:
+      - dest: 0.0.0.0/0
+        gateway_id: "{{ igw.gateway_id }}"
+  register: public_rt
+
+- name: not implemented yet
+  debug:
+    msg: |
+      Change pub-subnets to auto-assign external IPs
index 26a75ae8027c609b6a2f27ebbc47cc9bbe5569b4..e92a53aeafc513da275623eff79089de8485a154 100644 (file)
@@ -1,72 +1,15 @@
 ---
+- assert:
+    that:
+    - vpc_name != ''
+    - vpc_cidr != ''
+    - vpc_region != ''
+  tags: ['check_vars']
+
 - name: VPC
   ec2_vpc_net:
     state: present
     name: "{{ vpc_name }}"
     cidr_block: "{{ vpc_cidr }}"
     region: "{{ vpc_region }}"
-  register: vpc
-
-- name: IGW
-  ec2_vpc_igw:
-    state: present
-    vpc_id: "{{ vpc.vpc.id }}"
-    region: "{{ vpc_region }}"
-  register: igw
-
-- name: Name IGW
-  ec2_tag:
-    state: present
-    resource: "{{ igw.gateway_id }}"
-    region: "{{ vpc_region }}"
-    tags:
-      Name: "igw-{{ vpc_region }}"
-
-- name: Subnets
-  with_items: "{{ subnets_pub + subnets_priv }}"
-  ec2_vpc_subnet:
-    state: present
-    vpc_id: "{{ vpc.vpc.id }}"
-    region: "{{ vpc_region }}"
-    cidr: "{{ item.cidr }}"
-    az: "{{ item.az }}"
-    tags: "{{ item.resource_tags }}"
-
-- name: Access/NAT EIP
-  ec2_eip:
-    in_vpc: yes
-    region: "{{ vpc_region }}"
-    reuse_existing_ip_allowed: yes
-  register: access_eip
-
-- name: Private route table
-  ec2_vpc_route_table:
-    state: present
-    vpc_id: "{{ vpc.vpc.id }}"
-    region: "{{ vpc_region }}"
-    tags:
-      Name: "Private-Routes-vpc-{{ vpc_region }}"
-      zone: priv
-      managed: 'yes'
-    subnets: "{{ subnets_priv|map(attribute='cidr')|list }}"
-  register: private_rt
-
-- name: Public route table
-  ec2_vpc_route_table:
-    state: present
-    vpc_id: "{{ vpc.vpc.id }}"
-    region: "{{ vpc_region }}"
-    tags:
-      Name: "Public-Routes-vpc-{{ vpc_region }}"
-      zone: pub
-      managed: 'no'
-    subnets: "{{ subnets_pub|map(attribute='cidr')|list }}"
-    routes:
-      - dest: 0.0.0.0/0
-        gateway_id: "{{ igw.gateway_id }}"
-  register: public_rt
-
-- name: not implemented yet
-  debug:
-    msg: |
-      Change pub-subnets to auto-assign external IPs
+  register: vpc
\ No newline at end of file