split out management infrastructure role further
[awsible] / roles / aws-management-infrastructure / tasks / main.yml
1 ---
2 - assert:
3 that:
4 - MANAGEMENT_SUBNET != ''
5 - DEFAULT_AMI != ''
6 tags: ['check_vars']
7
8 - name: sg ssh
9 ec2_group:
10 vpc_id: "{{ vpc.vpc.id }}"
11 region: "{{ vpc_region }}"
12 state: present
13 name: ssh
14 description: "allow ssh from anywhere"
15 purge_rules: false
16 rules:
17 - proto: tcp
18 from_port: 22
19 to_port: 22
20 cidr_ip: 0.0.0.0/0
21 rules_egress:
22 - proto: all
23 cidr_ip: 0.0.0.0/0
24 register: sg_ssh
25
26 - name: sg management-elb
27 ec2_group:
28 vpc_id: "{{ vpc.vpc.id }}"
29 region: "{{ vpc_region }}"
30 state: present
31 name: management-elb
32 description: "sg for internal elb for monitoring management"
33 purge_rules: false
34 rules:
35 - proto: tcp
36 from_port: 22
37 to_port: 22
38 cidr_ip: 0.0.0.0/0
39 rules_egress:
40 - proto: all
41 cidr_ip: 0.0.0.0/0
42
43 - name: sg management
44 ec2_group:
45 vpc_id: "{{ vpc.vpc.id }}"
46 region: "{{ vpc_region }}"
47 state: present
48 name: management
49 description: "sg for management"
50 purge_rules: false
51 rules:
52 - proto: all
53 group_name: management
54 - proto: all
55 group_name: management-elb
56 register: sg_management
57
58 - name: elb management-int-elb
59 ec2_elb_lb:
60 region: "{{ vpc_region }}"
61 state: present
62 name: management-int-elb
63 cross_az_load_balancing: yes
64 scheme: internal
65 subnets: "{{ MANAGEMENT_SUBNET }}"
66 security_group_names:
67 - management-elb
68 listeners:
69 - protocol: tcp
70 load_balancer_port: 22
71 instance_port: 22
72 health_check:
73 ping_protocol: tcp
74 ping_port: 22
75 response_timeout: 5
76 interval: 30
77 unhealthy_threshold: 2
78 healthy_threshold: 2
79
80 - name: management key
81 ec2_key:
82 region: "{{ vpc_region }}"
83 name: "{{ MANAGEMENT_KEY_NAME }}"
84 key_material: "{{ item }}"
85 with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub
86
87 - name: management iam
88 iam:
89 name: management
90 iam_type: role
91 state: present
92
93 # this is only ansible 2.3+
94 # - name: management role policies
95 # iam_role:
96 # name: management
97 # state: present
98 # managed_policy:
99 # - arn:aws:iam::{{ ACCT_ID }}:policy/base-policy
100 # - arn:aws:iam::{{ ACCT_ID }}:policy/management-policy
101
102 # will need to rev name-version when changing AMI
103 - name: management lc
104 ec2_lc:
105 region: "{{ vpc_region }}"
106 name: management-0000
107 image_id: "{{ DEFAULT_AMI }}"
108 key_name: "{{ MANAGEMENT_KEY_NAME }}"
109 instance_profile_name: management
110 security_groups:
111 - "{{ sg_management.group_id }}"
112 - "{{ sg_ssh.group_id }}"
113 instance_type: m4.large
114 volumes:
115 # setting the root volume seems to prevent instances from launching
116 # - device_name: /dev/sda1
117 # volume_size: 8
118 # volume_type: gp2
119 # delete_on_termination: true
120 - device_name: /dev/sdb
121 ephemeral: ephemeral0
122 - device_name: /dev/sdc
123 ephemeral: ephemeral1
124 - device_name: /dev/sdd
125 ephemeral: ephemeral2
126 - device_name: /dev/sde
127 ephemeral: ephemeral3
128 register: mgmt_lc
129
130 - name: management asg
131 ec2_asg:
132 region: "{{ vpc_region }}"
133 name: management
134 min_size: 1
135 max_size: 1
136 desired_capacity: 1
137 default_cooldown: 10
138 vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}"
139 launch_config_name: "{{ mgmt_lc.name|default('checkmode') }}"
140 notification_topic: "{{ management_topic.sns_arn }}"
141 notification_types:
142 - autoscaling:EC2_INSTANCE_LAUNCH
143 load_balancers:
144 - management-int-elb
145 tags:
146 - account: "{{ ACCT_NAME }}"
147 propagate_at_launch: yes
148 - module: management
149 propagate_at_launch: yes
150 - stack: ""
151 propagate_at_launch: yes
152 - country: ""
153 propagate_at_launch: yes
154 - phase: dev
155 propagate_at_launch: yes
156
157 - name: not implemented yet
158 debug:
159 msg: |
160 attach policies to iam role