finish fixing common
[awsible] / roles / aws-infrastructure / tasks / main.yml
1 ---
2 - assert:
3 that:
4 - MANAGEMENT_EVENT_FAILURE_QUEUE != ''
5 - MANAGEMENT_EVENT_QUEUE != ''
6 - MANAGEMENT_SUBNET != ''
7 - DEFAULT_AMI != ''
8 tags: ['check_vars']
9
10 - name: Management failure queue.
11 sqs_queue:
12 state: present
13 region: "{{ vpc_region }}"
14 name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
15 default_visibility_timeout: 30
16 message_retention_period: 1209600
17 maximum_message_size: 262144
18 delivery_delay: 0
19 receive_message_wait_time: 0
20 register: management_failure_queue
21
22 # as of Ansible 2.2.1.0 sqs_queue does not seem to be returning queue_arn
23
24 - name: Managment queue.
25 sqs_queue:
26 state: present
27 region: "{{ vpc_region }}"
28 name: "{{ MANAGEMENT_EVENT_QUEUE }}"
29 default_visibility_timeout: 30
30 message_retention_period: 345600
31 maximum_message_size: 262144
32 delivery_delay: 0
33 receive_message_wait_time: 20
34 redrive_policy:
35 maxReceiveCount: 5
36 deadLetterTargetArn: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
37 # deadLetterTargetArn: "{{ management_failure_queue.queue_arn }}"
38 register: management_queue
39
40 - name: Management topic and subscription.
41 sns_topic:
42 state: present
43 region: "{{ vpc_region }}"
44 name: "{{ MANAGEMENT_EVENT_QUEUE }}"
45 display_name: "{{ MANAGEMENT_EVENT_QUEUE_SHORT }}"
46 purge_subscriptions: False
47 subscriptions:
48 # - endpoint: "{{ management_queue.queue_arn }}"
49 - endpoint: "arn:aws:sqs:{{ vpc_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
50 protocol: "sqs"
51 register: management_topic
52
53 - name: Management notice topic
54 sns_topic:
55 state: present
56 region: "{{ vpc_region }}"
57 name: "{{ MANAGEMENT_NOTICE_TOPIC }}"
58 display_name: "{{ MANAGEMENT_NOTICE_TOPIC_SHORT }}"
59 purge_subscriptions: False
60 register: management_notice_topic
61
62 - name: Management backup bucket
63 when: MANAGEMENT_BACKUP_S3_BUCKET is defined
64 s3_bucket:
65 state: present
66 name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
67
68 - name: sg ssh
69 ec2_group:
70 vpc_id: "{{ vpc.vpc.id }}"
71 region: "{{ vpc_region }}"
72 state: present
73 name: ssh
74 description: "allow ssh from anywhere"
75 purge_rules: false
76 rules:
77 - proto: tcp
78 from_port: 22
79 to_port: 22
80 cidr_ip: 0.0.0.0/0
81 rules_egress:
82 - proto: all
83 cidr_ip: 0.0.0.0/0
84 register: sg_ssh
85
86 - name: sg management-elb
87 ec2_group:
88 vpc_id: "{{ vpc.vpc.id }}"
89 region: "{{ vpc_region }}"
90 state: present
91 name: management-elb
92 description: "sg for internal elb for monitoring management"
93 purge_rules: false
94 rules:
95 - proto: tcp
96 from_port: 22
97 to_port: 22
98 cidr_ip: 0.0.0.0/0
99 rules_egress:
100 - proto: all
101 cidr_ip: 0.0.0.0/0
102
103 - name: sg management
104 ec2_group:
105 vpc_id: "{{ vpc.vpc.id }}"
106 region: "{{ vpc_region }}"
107 state: present
108 name: management
109 description: "sg for management"
110 purge_rules: false
111 rules:
112 - proto: all
113 group_name: management
114 - proto: all
115 group_name: management-elb
116 register: sg_management
117
118 - name: elb management-int-elb
119 ec2_elb_lb:
120 region: "{{ vpc_region }}"
121 state: present
122 name: management-int-elb
123 cross_az_load_balancing: yes
124 scheme: internal
125 subnets: "{{ MANAGEMENT_SUBNET }}"
126 security_group_names:
127 - management-elb
128 listeners:
129 - protocol: tcp
130 load_balancer_port: 22
131 instance_port: 22
132 health_check:
133 ping_protocol: tcp
134 ping_port: 22
135 response_timeout: 5
136 interval: 30
137 unhealthy_threshold: 2
138 healthy_threshold: 2
139
140 - name: management key
141 ec2_key:
142 region: "{{ vpc_region }}"
143 name: "{{ MANAGEMENT_KEY_NAME }}"
144 key_material: "{{ item }}"
145 with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub
146
147 - name: management iam
148 iam:
149 name: management
150 iam_type: role
151 state: present
152
153 # this is only ansible 2.3+
154 # - name: management role policies
155 # iam_role:
156 # name: management
157 # state: present
158 # managed_policy:
159 # - arn:aws:iam::{{ ACCT_ID }}:policy/base-policy
160 # - arn:aws:iam::{{ ACCT_ID }}:policy/management-policy
161
162 # will need to rev name-version when changing AMI
163 - name: management lc
164 ec2_lc:
165 region: "{{ vpc_region }}"
166 name: management-0000
167 image_id: "{{ DEFAULT_AMI }}"
168 key_name: "{{ MANAGEMENT_KEY_NAME }}"
169 instance_profile_name: management
170 security_groups:
171 - "{{ sg_management.group_id }}"
172 - "{{ sg_ssh.group_id }}"
173 instance_type: m4.large
174 volumes:
175 # setting the root volume seems to prevent instances from launching
176 # - device_name: /dev/sda1
177 # volume_size: 8
178 # volume_type: gp2
179 # delete_on_termination: true
180 - device_name: /dev/sdb
181 ephemeral: ephemeral0
182 - device_name: /dev/sdc
183 ephemeral: ephemeral1
184 - device_name: /dev/sdd
185 ephemeral: ephemeral2
186 - device_name: /dev/sde
187 ephemeral: ephemeral3
188 register: mgmt_lc
189
190 - name: management asg
191 ec2_asg:
192 region: "{{ vpc_region }}"
193 name: management
194 min_size: 1
195 max_size: 1
196 desired_capacity: 1
197 default_cooldown: 10
198 vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
199 launch_config_name: "{{ mgmt_lc.name }}"
200 notification_topic: "{{ management_topic.sns_arn }}"
201 notification_types:
202 - autoscaling:EC2_INSTANCE_LAUNCH
203 load_balancers:
204 - management-int-elb
205 tags:
206 - module: management
207 propagate_at_launch: yes
208
209 - name: not implemented yet
210 debug:
211 msg: |
212 attach policies to iam role