assorted small fixes, add some management infrastructure
[awsible] / roles / aws-infrastructure / tasks / main.yml
1 - action: ec2_facts
2
3 - name: Managment queue.
4 sqs_queue:
5 state: present
6 name: "{{ MANAGEMENT_EVENT_QUEUE }}"
7 default_visibility_timeout: 30
8 message_retention_period: 345600
9 maximum_message_size: 262144
10 delivery_delay: 0
11 receive_message_wait_time: 20
12
13 - name: Management failure queue.
14 sqs_queue:
15 state: present
16 name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}"
17 default_visibility_timeout: 30
18 message_retention_period: 1209600
19 maximum_message_size: 262144
20 delivery_delay: 0
21 receive_message_wait_time: 0
22
23 - name: Management topic and subscription.
24 sns_topic:
25 state: present
26 name: "{{ MANAGEMENT_EVENT_QUEUE }}"
27 display_name: "management"
28 purge_subscriptions: False
29 subscriptions:
30 - endpoint: "arn:aws:sqs:{{ ansible_ec2_placement_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}"
31 protocol: "sqs"
32
33 - name: Ensure management backup bucket exists.
34 s3_bucket:
35 state: present
36 when: MANAGEMENT_BACKUP_S3_BUCKET|defined
37 name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}"
38
39 - name: sg ssh
40 ec2_group:
41 state: present
42 name: sshOnly
43 description: "allow ssh from anywhere"
44 purge_rules: false
45 rules:
46 - proto: tcp
47 from_port: 22
48 to_port: 22
49 cidr_ip: 0.0.0.0/0
50 rules_egress:
51 - proto: all
52 cidr_ip: 0.0.0.0/0
53
54 - name: sg management-elb
55 ec2_group:
56 state: present
57 name: management-elb
58 description: "sg for internal elb for monitoring management"
59 purge_rules: false
60 rules:
61 - proto: tcp
62 from_port: 22
63 to_port: 22
64 cidr_ip: 0.0.0.0/0
65 rules_egress:
66 - proto: all
67 cidr_ip: 0.0.0.0/0
68
69 - name: sg management
70 ec2_group:
71 state: present
72 name: management
73 description: "sg for management"
74 purge_rules: false
75 rules:
76 - proto: all
77 group_name: management
78 - proto: all
79 group_name: management-elb
80
81 - name: elb management-int-elb
82 ec2_elb_lb:
83 state: present
84 name: management-int-elb
85 cross_az_load_balancing: yes
86 scheme: internal
87 subnets: "{{ MANAGEMENT_SUBNET }}"
88 security_group_names:
89 - management-elb
90 listeners:
91 - protocol: tcp
92 load_balancer_port: 22
93 instance_port: 22
94 health_check:
95 ping_protocol: tcp
96 ping_port: 22
97 response_timeout: 5
98 interval: 30
99 unhealthy_threshold: 2
100 healthy_threshold: 2
101
102 - name: management iam
103 iam:
104 name: management
105 iam_type: role
106 state: present
107
108 - name: management lc
109 ec2_lc:
110 name: management-0000
111 image_id: "{{ DEFAULT_AMI }}"
112 key_name: management-key
113 security_groups:
114 - management
115 - sshOnly
116 instance_type: m4.large
117 volumes:
118 - device_name: /dev/sda1
119 volume_size: 8
120 volume_type: gp2
121 delete_on_termination: true
122 - device_name: /dev/sdb
123 ephemeral: ephemeral0
124 - device_name: /dev/sdc
125 ephemeral: ephemeral1
126 - device_name: /dev/sdd
127 ephemeral: ephemeral2
128 - device_name: /dev/sde
129 ephemeral: ephemeral3
130 register: mgmt_lc
131
132 - name: management asg
133 ec2_asg:
134 name: management
135 min_size: 1
136 max_size: 1
137 desired_capacity: 1
138 vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
139 launch_config_name: "{{ mgmt_lc.something.name }}"
140 tags:
141 - module: management
142 propogate_at_launch: yes
143