k3s/tests/perf/server/main.tf

237 lines
6.7 KiB
Terraform
Raw Normal View History

2019-10-31 21:06:54 +00:00
terraform {
backend "local" {
path = "server.tfstate"
}
}
locals {
2019-11-04 16:09:54 +00:00
name = var.name
k3s_cluster_secret = var.k3s_cluster_secret
install_k3s_version = var.k3s_version
prom_worker_node_count = var.prom_worker_node_count
prom_worker_instance_type = var.prom_worker_instance_type
2019-10-31 21:06:54 +00:00
}
provider "aws" {
2019-11-15 11:14:28 +00:00
region = "us-east-2"
2019-10-31 21:06:54 +00:00
profile = "rancher-eng"
}
resource "aws_security_group" "k3s" {
name = "${local.name}-sg"
vpc_id = data.aws_vpc.default.id
ingress {
from_port = 22
to_port = 22
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 6443
to_port = 6443
protocol = "TCP"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_db_instance" "k3s_db" {
2019-11-15 11:14:28 +00:00
count = "${var.db_engine == "postgres" || var.db_engine == "mysql" ? 1 : 0 }"
2019-10-31 21:06:54 +00:00
allocated_storage = 100 #baseline iops is 300 with gp2
2019-11-15 11:14:28 +00:00
storage_type = "gp2"
2019-11-04 16:09:54 +00:00
engine = "${var.db_engine}"
engine_version = "${var.db_version}"
2019-10-31 21:06:54 +00:00
instance_class = "${var.db_instance_type}"
name = "${var.db_name}"
username = "${var.db_username}"
password = "${var.db_password}"
skip_final_snapshot = true
multi_az = false
}
2019-11-04 16:09:54 +00:00
resource "aws_instance" "k3s_etcd" {
count = "${var.etcd_count * (var.db_engine == "etcd" ? 1 * var.server_ha : 0)}"
instance_type = replace(var.db_instance_type, "/db./", "")
ami = data.aws_ami.ubuntu.id
user_data = base64encode(templatefile("${path.module}/files/etcd.tmpl",
{
extra_ssh_keys = var.extra_ssh_keys,
db_version = var.db_version
etcd_count = var.etcd_count
}))
security_groups = [
aws_security_group.k3s.name,
]
root_block_device {
2019-11-15 11:14:28 +00:00
volume_size = "30"
2019-11-04 16:09:54 +00:00
volume_type = "gp2"
}
tags = {
Name = "${local.name}-etcd-${count.index}"
}
}
2019-10-31 21:06:54 +00:00
resource "aws_lb" "k3s-master-nlb" {
name = "${local.name}-nlb"
internal = false
load_balancer_type = "network"
2019-11-04 16:09:54 +00:00
subnets = data.aws_subnet_ids.available.ids
}
resource "aws_route53_record" "www" {
# currently there is the only way to use nlb dns name in k3s
# because the real dns name is too long and cause an issue
zone_id = "${var.zone_id}"
name = "${var.domain_name}"
type = "CNAME"
ttl = "30"
records = ["${aws_lb.k3s-master-nlb.dns_name}"]
2019-10-31 21:06:54 +00:00
}
2019-11-04 16:09:54 +00:00
2019-10-31 21:06:54 +00:00
resource "aws_lb_target_group" "k3s-master-nlb-tg" {
name = "${local.name}-nlb-tg"
port = "6443"
protocol = "TCP"
vpc_id = data.aws_vpc.default.id
deregistration_delay = "300"
health_check {
interval = "30"
port = "6443"
protocol = "TCP"
healthy_threshold = "10"
unhealthy_threshold= "10"
}
}
resource "aws_lb_listener" "k3s-master-nlb-tg" {
load_balancer_arn = "${aws_lb.k3s-master-nlb.arn}"
port = "6443"
protocol = "TCP"
default_action {
target_group_arn = "${aws_lb_target_group.k3s-master-nlb-tg.arn}"
type = "forward"
}
}
resource "aws_lb_target_group_attachment" "test" {
2019-11-04 16:09:54 +00:00
count = "${var.server_count}"
2019-10-31 21:06:54 +00:00
target_group_arn = "${aws_lb_target_group.k3s-master-nlb-tg.arn}"
2019-11-04 16:09:54 +00:00
target_id = "${aws_instance.k3s-server[count.index].id}"
2019-10-31 21:06:54 +00:00
port = 6443
}
2019-11-04 16:09:54 +00:00
resource "aws_instance" "k3s-server" {
count = "${var.server_count}"
2019-10-31 21:06:54 +00:00
instance_type = var.server_instance_type
ami = data.aws_ami.ubuntu.id
user_data = base64encode(templatefile("${path.module}/files/server_userdata.tmpl",
{
extra_ssh_keys = var.extra_ssh_keys,
k3s_cluster_secret = local.k3s_cluster_secret,
install_k3s_version = local.install_k3s_version,
k3s_server_args = var.k3s_server_args,
2019-11-15 11:14:28 +00:00
db_engine = var.db_engine,
db_address = "${var.db_engine == "etcd" ? join(",",aws_instance.k3s_etcd.*.private_ip) : var.db_engine == "embedded-etcd" ? "null" : aws_db_instance.k3s_db[0].address}",
2019-11-04 16:09:54 +00:00
db_name = var.db_name,
db_username = var.db_username,
db_password = var.db_password,
use_ha = "${var.server_ha == 1 ? "true": "false"}",
2019-10-31 21:06:54 +00:00
master_index = count.index,
2019-11-04 16:09:54 +00:00
lb_address = var.domain_name,
2019-10-31 21:06:54 +00:00
prom_worker_node_count = local.prom_worker_node_count,
2019-11-04 16:09:54 +00:00
debug = var.debug,
k3s_cluster_secret = local.k3s_cluster_secret,}))
2019-10-31 21:06:54 +00:00
security_groups = [
aws_security_group.k3s.name,
]
root_block_device {
2019-11-15 11:14:28 +00:00
volume_size = "30"
2019-10-31 21:06:54 +00:00
volume_type = "gp2"
}
tags = {
Name = "${local.name}-server-${count.index}"
2019-11-15 11:14:28 +00:00
Role = "master"
Leader = "${count.index == 0 ? "true" : "false"}"
2019-10-31 21:06:54 +00:00
}
provisioner "local-exec" {
command = "sleep 10"
}
}
module "k3s-prom-worker-asg" {
source = "terraform-aws-modules/autoscaling/aws"
version = "3.0.0"
name = "${local.name}-prom-worker"
asg_name = "${local.name}-prom-worker"
2019-11-04 16:09:54 +00:00
instance_type = local.prom_worker_instance_type
2019-10-31 21:06:54 +00:00
image_id = data.aws_ami.ubuntu.id
2019-11-04 16:09:54 +00:00
user_data = base64encode(templatefile("${path.module}/files/worker_userdata.tmpl", { extra_ssh_keys = var.extra_ssh_keys, k3s_url = var.domain_name, k3s_cluster_secret = local.k3s_cluster_secret, install_k3s_version = local.install_k3s_version, k3s_exec = "--node-label prom=true" }))
2019-10-31 21:06:54 +00:00
desired_capacity = local.prom_worker_node_count
health_check_type = "EC2"
max_size = local.prom_worker_node_count
min_size = local.prom_worker_node_count
vpc_zone_identifier = [data.aws_subnet.selected.id]
spot_price = "0.340"
security_groups = [
aws_security_group.k3s.id,
]
lc_name = "${local.name}-prom-worker"
root_block_device = [
{
2019-11-15 11:14:28 +00:00
volume_size = "30"
2019-10-31 21:06:54 +00:00
volume_type = "gp2"
},
]
}
2019-11-04 16:09:54 +00:00
resource "null_resource" "run_etcd" {
count = "${var.db_engine == "etcd" ? 1 : 0}"
triggers = {
etcd_instance_ids = "${join(",", aws_instance.k3s_etcd.*.id)}"
}
provisioner "local-exec" {
interpreter = ["bash", "-c"]
command = "DB_VERSION=${var.db_version} SSH_KEY_PATH=${var.ssh_key_path} PUBLIC_IPS=${join(",",aws_instance.k3s_etcd.*.public_ip)} PRIVATE_IPS=${join(",",aws_instance.k3s_etcd.*.private_ip)} files/etcd_build.sh"
}
}
2019-10-31 21:06:54 +00:00
resource "null_resource" "get-kubeconfig" {
provisioner "local-exec" {
interpreter = ["bash", "-c"]
2019-11-04 16:09:54 +00:00
command = "until ssh -i ${var.ssh_key_path} ubuntu@${aws_instance.k3s-server[0].public_ip} 'sudo sed \"s/localhost/$var.domain_name}/g;s/127.0.0.1/${var.domain_name}/g\" /etc/rancher/k3s/k3s.yaml' >| ../tests/kubeconfig.yaml; do sleep 5; done"
2019-10-31 21:06:54 +00:00
}
}