--- - assert: that: - MANAGEMENT_SUBNET != '' - DEFAULT_AMI != '' tags: ['check_vars'] - name: sg ssh ec2_group: vpc_id: "{{ vpc.vpc.id }}" region: "{{ vpc_region }}" state: present name: ssh description: "allow ssh from anywhere" purge_rules: false rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 rules_egress: - proto: all cidr_ip: 0.0.0.0/0 register: sg_ssh - name: sg management-elb ec2_group: vpc_id: "{{ vpc.vpc.id }}" region: "{{ vpc_region }}" state: present name: management-elb description: "sg for internal elb for monitoring management" purge_rules: false rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 rules_egress: - proto: all cidr_ip: 0.0.0.0/0 - name: sg management ec2_group: vpc_id: "{{ vpc.vpc.id }}" region: "{{ vpc_region }}" state: present name: management description: "sg for management" purge_rules: false rules: - proto: all group_name: management - proto: all group_name: management-elb register: sg_management - name: elb management-int-elb ec2_elb_lb: region: "{{ vpc_region }}" state: present name: management-int-elb cross_az_load_balancing: yes scheme: internal subnets: "{{ MANAGEMENT_SUBNET }}" security_group_names: - management-elb listeners: - protocol: tcp load_balancer_port: 22 instance_port: 22 health_check: ping_protocol: tcp ping_port: 22 response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 - name: management key ec2_key: region: "{{ vpc_region }}" name: "{{ MANAGEMENT_KEY_NAME }}" key_material: "{{ item }}" with_file: keys/{{ MANAGEMENT_KEY_NAME }}.pub - name: management iam iam: name: management iam_type: role state: present # this is only ansible 2.3+ # - name: management role policies # iam_role: # name: management # state: present # managed_policy: # - arn:aws:iam::{{ ACCT_ID }}:policy/base-policy # - arn:aws:iam::{{ ACCT_ID }}:policy/management-policy # will need to rev name-version when changing AMI - name: management lc ec2_lc: region: "{{ vpc_region }}" name: management-0000 image_id: "{{ DEFAULT_AMI }}" key_name: "{{ MANAGEMENT_KEY_NAME }}" instance_profile_name: management security_groups: - "{{ sg_management.group_id }}" - "{{ sg_ssh.group_id }}" instance_type: m4.large volumes: # setting the root volume seems to prevent instances from launching # - device_name: /dev/sda1 # volume_size: 8 # volume_type: gp2 # delete_on_termination: true - device_name: /dev/sdb ephemeral: ephemeral0 - device_name: /dev/sdc ephemeral: ephemeral1 - device_name: /dev/sdd ephemeral: ephemeral2 - device_name: /dev/sde ephemeral: ephemeral3 register: mgmt_lc - name: management asg ec2_asg: region: "{{ vpc_region }}" name: management min_size: 1 max_size: 1 desired_capacity: 1 default_cooldown: 10 vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}" launch_config_name: "{{ mgmt_lc.name|default('checkmode') }}" notification_topic: "{{ management_topic.sns_arn }}" notification_types: - autoscaling:EC2_INSTANCE_LAUNCH load_balancers: - management-int-elb tags: - account: "{{ ACCT_NAME }}" propagate_at_launch: yes - module: management propagate_at_launch: yes - stack: "" propagate_at_launch: yes - country: "" propagate_at_launch: yes - phase: dev propagate_at_launch: yes - name: not implemented yet debug: msg: | attach policies to iam role