admin管理员组文章数量:1391947
I'm using Terraform to deploy a simple architecture on AWS:
- 3 auto-scaling EC2 instances (ASG) in a private subnet and install nginx on the servers
- ALB in a public subnet
- Public subnet and private subnet are in different availability zones.
- EC2 servers to download content from a private S3 bucket and serve it using nginx
- Restrict inbound access to ALB and server fleet to port 80/TCP.
I'm getting a 502 Bad Gateway Error on the ALB DNS and the target group instances are failing health checks.
This is the latest version of the main.tf file.
provider "aws" {
region = "ap-southeast-1"
}
data "aws_vpc" "default" {
default = true
}
data "aws_subnets" "public" {
filter {
name = "vpc-id"
values = [data.aws_vpc.default.id]
}
}
resource "aws_subnet" "private_subnet" {
vpc_id = data.aws_vpc.default.id
cidr_block = "172.31.32.0/20"
availability_zone = "ap-southeast-1b"
}
# Elastic IP for NAT Gateway
resource "aws_eip" "nat_eip" {
domain = "vpc"
}
# NAT Gateway in the public subnet
resource "aws_nat_gateway" "nat_gateway" {
allocation_id = aws_eip.nat_eip.id
subnet_id = data.aws_subnets.public.id
}
# Routing table for the private subnet
resource "aws_route_table" "private_route_table" {
vpc_id = data.aws_vpc.default.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.nat_gateway.id
}
}
# Associate the routing table with the private subnet
resource "aws_route_table_association" "private_route_association" {
subnet_id = aws_subnet.private_subnet.id
route_table_id = aws_route_table.private_route_table.id
}
resource "aws_security_group" "alb_sg" {
vpc_id = data.aws_vpc.default.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "ec2_sg" {
vpc_id = data.aws_vpc.default.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
security_groups = [aws_security_group.alb_sg.id]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# IAM Role for EC2 to access S3
resource "aws_iam_role" "ec2_s3_role" {
name = "EC2_S3_Access"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [{
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws"
}
Action = "sts:AssumeRole"
}]
})
}
resource "aws_iam_policy" "s3_read_policy" {
name = "S3ReadAccess"
description = "Allows EC2 to read from S3 bucket"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = "s3:GetObject"
Resource = "arn:aws:s3:::cherong-bucket/*"
}
]
})
}
resource "aws_iam_role_policy_attachment" "ec2_s3_policy" {
role = aws_iam_role.ec2_s3_role.name
policy_arn = aws_iam_policy.s3_read_policy.arn
}
resource "aws_iam_instance_profile" "ec2_s3_profile" {
name = "ec2-s3-profile"
role = aws_iam_role.ec2_s3_role.name
}
# Launch Template
resource "aws_launch_template" "sever_fleet_a" {
name = "server-fleet-a"
image_id = "ami-0599cde8e4a7ca305"
instance_type = "t2.micro"
iam_instance_profile {
name = aws_iam_instance_profile.ec2_s3_profile.name
}
network_interfaces {
associate_public_ip_address = false
security_groups = [aws_security_group.ec2_sg.id]
}
user_data = base64encode(<<EOF
#!/bin/bash
sudo yum update -y
sudo yum install -y nginx awscli
aws s3 cp --recursive s3://cherong-bucket/webapp/index.html /usr/share/nginx/html/index.html
sudo systemctl start nginx
sudo systemctl enable nginx
EOF
)
}
resource "aws_autoscaling_group" "asg" {
vpc_zone_identifier = [aws_subnet.private_subnet.id]
desired_capacity = 3
min_size = 3
max_size = 3
health_check_type = "ELB"
launch_template {
id = aws_launch_template.sever_fleet_a.id
version = "$Latest"
}
}
resource "aws_lb" "alb" {
name = "public-alb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.alb_sg.id]
subnets = data.aws_subnets.public.ids
}
resource "aws_lb_target_group" "tg" {
name = "tg-server-fleet-a"
port = 80
protocol = "HTTP"
vpc_id = data.aws_vpc.default.id
health_check {
path = "/"
matcher = "200"
healthy_threshold = 3
unhealthy_threshold = 3
timeout = 5
interval = 30
}
}
resource "aws_lb_listener" "listener" {
load_balancer_arn = aws_lb.alb.arn
port = 80
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.tg.arn
}
}
resource "aws_autoscaling_attachment" "asg_attachment" {
autoscaling_group_name = aws_autoscaling_group.asg.id
lb_target_group_arn = aws_lb_target_group.tg.arn
}
output "alb_dns_name" {
value = aws_lb.alb.dns_name
description = " The domain name of the load balancer"
}
Initially I did not add a routing table for the private subnet to point to a NAT gateway. It resulted in the same 502 Bad Gateway error. I read that the 502 Bad Gateway error means the ALB cannot communicate with the EC2 instances. Is there a problem with the configuration of my ALB and ASG security groups?
本文标签:
版权声明:本文标题:amazon ec2 - Terraform deployment - 502 Bad Gateway on ALB DNS; Target group EC2 instances failed health checks - Stack Overflow 内容由网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:http://www.betaflare.com/web/1744689496a2619906.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
发表评论