further progress on infrastructure buildout
[awsible] / roles / aws-infrastructure / tasks / main.yml
1 ---
2 - assert:
3 that:
4 - MANAGEMENT_EVENT_FAILURE_QUEUE != ''
5 - MANAGEMENT_EVENT_QUEUE != ''
6 - MANAGEMENT_SUBNET != ''
7 - DEFAULT_AMI != ''
8 tags: ['check_vars']
9
10 - name: Management failure queue.
11 sqs_queue:
12 state: present
13 region: "{{ vpc_region }}"
14 name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
15 default_visibility_timeout: 30
16 message_retention_period: 1209600
17 maximum_message_size: 262144
18 delivery_delay: 0
19 receive_message_wait_time: 0
20 register: management_failure_queue
21
22 - debug:
23 var: management_failure_queue
24
25 - name: Managment queue.
26 sqs_queue:
27 state: present
28 region: "{{ vpc_region }}"
29 name: "{{ MANAGEMENT_EVENT_QUEUE }}"
30 default_visibility_timeout: 30
31 message_retention_period: 345600
32 maximum_message_size: 262144
33 delivery_delay: 0
34 receive_message_wait_time: 20
35 redrive_policy:
36 maxReceiveCount: 5
37 deadLetterTargetArn: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
38 # deadLetterTargetArn: "{{ management_failure_queue.queue_arn }}"
39 register: management_queue
40
41 - name: Management topic and subscription.
42 sns_topic:
43 state: present
44 region: "{{ vpc_region }}"
45 name: "{{ MANAGEMENT_EVENT_QUEUE }}"
46 display_name: "{{ MANAGEMENT_EVENT_QUEUE_SHORT }}"
47 purge_subscriptions: False
48 subscriptions:
49 # - endpoint: "{{ management_queue.queue_arn }}"
50 - endpoint: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
51 protocol: "sqs"
52 register: management_topic
53
54 - name: Management notice topic
55 sns_topic:
56 state: present
57 region: "{{ vpc_region }}"
58 name: "{{ MANAGEMENT_NOTICE_TOPIC }}"
59 display_name: "{{ MANAGEMENT_NOTICE_TOPIC_SHORT }}"
60 purge_subscriptions: False
61 register: management_notice_topic
62
63 # - name: Ensure management backup bucket exists.
64 # s3_bucket:
65 # when: MANAGEMENT_BACKUP_S3_BUCKET is defined
66 # state: present
67 # name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
68
69 - name: sg ssh
70 ec2_group:
71 vpc_id: "{{ vpc.vpc.id }}"
72 region: "{{ vpc_region }}"
73 state: present
74 name: ssh
75 description: "allow ssh from anywhere"
76 purge_rules: false
77 rules:
78 - proto: tcp
79 from_port: 22
80 to_port: 22
81 cidr_ip: 0.0.0.0/0
82 rules_egress:
83 - proto: all
84 cidr_ip: 0.0.0.0/0
85 register: sg_ssh
86
87 - name: sg management-elb
88 ec2_group:
89 vpc_id: "{{ vpc.vpc.id }}"
90 region: "{{ vpc_region }}"
91 state: present
92 name: management-elb
93 description: "sg for internal elb for monitoring management"
94 purge_rules: false
95 rules:
96 - proto: tcp
97 from_port: 22
98 to_port: 22
99 cidr_ip: 0.0.0.0/0
100 rules_egress:
101 - proto: all
102 cidr_ip: 0.0.0.0/0
103
104 - name: sg management
105 ec2_group:
106 vpc_id: "{{ vpc.vpc.id }}"
107 region: "{{ vpc_region }}"
108 state: present
109 name: management
110 description: "sg for management"
111 purge_rules: false
112 rules:
113 - proto: all
114 group_name: management
115 - proto: all
116 group_name: management-elb
117 register: sg_management
118
119 - debug:
120 var: sg_management
121
122 - name: elb management-int-elb
123 ec2_elb_lb:
124 region: "{{ vpc_region }}"
125 state: present
126 name: management-int-elb
127 cross_az_load_balancing: yes
128 scheme: internal
129 subnets: "{{ MANAGEMENT_SUBNET }}"
130 security_group_names:
131 - management-elb
132 listeners:
133 - protocol: tcp
134 load_balancer_port: 22
135 instance_port: 22
136 health_check:
137 ping_protocol: tcp
138 ping_port: 22
139 response_timeout: 5
140 interval: 30
141 unhealthy_threshold: 2
142 healthy_threshold: 2
143
144 - name: management key
145 ec2_key:
146 region: "{{ vpc_region }}"
147 name: "{{ MANAGEMENT_KEY_NAME }}"
148 key_material: "{{ item }}"
149 with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub
150
151 - name: management iam
152 iam:
153 name: management
154 iam_type: role
155 state: present
156
157 - name: management lc
158 ec2_lc:
159 region: "{{ vpc_region }}"
160 name: management-0000
161 image_id: "{{ DEFAULT_AMI }}"
162 key_name: "{{ MANAGEMENT_KEY_NAME }}"
163 instance_profile_name: management
164 security_groups:
165 - "{{ sg_management.group_id }}"
166 - "{{ sg_ssh.group_id }}"
167 instance_type: m4.large
168 volumes:
169 - device_name: /dev/sda1
170 volume_size: 8
171 volume_type: gp2
172 delete_on_termination: true
173 - device_name: /dev/sdb
174 ephemeral: ephemeral0
175 - device_name: /dev/sdc
176 ephemeral: ephemeral1
177 - device_name: /dev/sdd
178 ephemeral: ephemeral2
179 - device_name: /dev/sde
180 ephemeral: ephemeral3
181 register: mgmt_lc
182
183 - debug:
184 var: mgmt_lc
185
186 - name: management asg
187 ec2_asg:
188 region: "{{ vpc_region }}"
189 name: management
190 min_size: 1
191 max_size: 1
192 desired_capacity: 1
193 vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
194 launch_config_name: "{{ mgmt_lc.name }}"
195 notification_topic: "{{ management_topic.sns_arn }}"
196 notification_types:
197 - autoscaling:EC2_INSTANCE_LAUNCH
198 load_balancers:
199 - management-int-elb
200 tags:
201 - module: management
202 propogate_at_launch: yes
203