- action: ec2_facts - name: Managment queue. sqs_queue: state: present name: "{{ MANAGEMENT_EVENT_QUEUE }}" default_visibility_timeout: 30 message_retention_period: 345600 maximum_message_size: 262144 delivery_delay: 0 receive_message_wait_time: 20 - name: Management failure queue. sqs_queue: state: present name: "{{ MANAGEMENT_EVENT_FAILURE_QUEUE }}" default_visibility_timeout: 30 message_retention_period: 1209600 maximum_message_size: 262144 delivery_delay: 0 receive_message_wait_time: 0 - name: Management topic and subscription. sns_topic: state: present name: "{{ MANAGEMENT_EVENT_QUEUE }}" display_name: "management" purge_subscriptions: False subscriptions: - endpoint: "arn:aws:sqs:{{ ansible_ec2_placement_region }}:{{ ACCT_ID }}:{{ MANAGEMENT_EVENT_QUEUE }}" protocol: "sqs" - name: Ensure management backup bucket exists. s3_bucket: state: present when: MANAGEMENT_BACKUP_S3_BUCKET|defined name: "{{ MANAGEMENT_BACKUP_S3_BUCKET }}" - name: sg ssh ec2_group: state: present name: sshOnly description: "allow ssh from anywhere" purge_rules: false rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 rules_egress: - proto: all cidr_ip: 0.0.0.0/0 - name: sg management-elb ec2_group: state: present name: management-elb description: "sg for internal elb for monitoring management" purge_rules: false rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 rules_egress: - proto: all cidr_ip: 0.0.0.0/0 - name: sg management ec2_group: state: present name: management description: "sg for management" purge_rules: false rules: - proto: all group_name: management - proto: all group_name: management-elb - name: elb management-int-elb ec2_elb_lb: state: present name: management-int-elb cross_az_load_balancing: yes scheme: internal subnets: "{{ MANAGEMENT_SUBNET }}" security_group_names: - management-elb listeners: - protocol: tcp load_balancer_port: 22 instance_port: 22 health_check: ping_protocol: tcp ping_port: 22 response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 - name: management iam iam: name: management iam_type: role state: present - name: management lc ec2_lc: name: management-0000 image_id: "{{ DEFAULT_AMI }}" key_name: management-key security_groups: - management - sshOnly instance_type: m4.large volumes: - device_name: /dev/sda1 volume_size: 8 volume_type: gp2 delete_on_termination: true - device_name: /dev/sdb ephemeral: ephemeral0 - device_name: /dev/sdc ephemeral: ephemeral1 - device_name: /dev/sdd ephemeral: ephemeral2 - device_name: /dev/sde ephemeral: ephemeral3 register: mgmt_lc - name: management asg ec2_asg: name: management min_size: 1 max_size: 1 desired_capacity: 1 vpc_zone_identifier: "{{ MANAGEMENT_SUBNET }}" launch_config_name: "{{ mgmt_lc.something.name }}" tags: - module: management propogate_at_launch: yes