IT Cloud. Eugeny Shtoltc
= "$ {aws_iam_role.eks_vuejs_phalcon.arn}"
vpc_config {
subnet_ids = ["$ {aws_subnet.eks_vuejs_phalcon.id}", "$ {aws_subnet.example2.id}"]
}
}
output "endpoint" {
value = "$ {aws_eks_cluster.eks_vuejs_phalcon.endpoint}"
}
output "kubeconfig-certificate-authority-data" {
value = "$ {aws_eks_cluster.eks_vuejs_phalcon.certificate_authority.0.data}"
}
provider "google" {
credentials = "$ {file (" account.json ")}"
project = "my-project-id"
region = "us-central1"
}
resource "google_container_cluster" "primary" {
name = "my-gke-cluster"
location = "us-central1"
remove_default_node_pool = true
initial_node_count = 1
master_auth {
username = ""
password = ""
}
}
output "client_certificate" {
value = "$ {google_container_cluster.primary.master_auth.0.client_certificate}"
}
output "client_key" {
value = "$ {google_container_cluster.primary.master_auth.0.client_key}"
}
output "cluster_ca_certificate" {
value = "$ {google_container_cluster.primary.master_auth.0.cluster_ca_certificate}"
}
$ cat deployment.yml
apiVersion: apps / v1
kind: Deployment
metadata:
name: phalcon_vuejs
namespace: development
spec:
selector:
matchLabels:
app: vuejs
replicas: 1
template:
metadata:
labels:
app: vuejs
spec:
initContainers:
– name: vuejs_build
image: vuejs / ci
volumeMounts:
– name: app
mountPath: / app / public
command:
– / bin / bash
– -c
– |
cd / app / public
git clone essch / vuejs_phalcon: 1.0.
npm test
npm build
containers:
– name: healtcheck
image: mileschou / phalcon: 7.2-cli
args:
– / bin / sh
– -c
– cd / usr / src / app && git clone essch / app_phalcon: 1.0 && touch / tmp / healthy && sleep 10 && php script.php
readinessProbe:
exec:
command:
– cat
– / tmp / healthy
initialDelaySeconds: 5
periodSeconds: 5
livenessProbe:
exec:
command:
– cat
– / tmp / healthy
initialDelaySeconds: 15
periodSeconds: 5
voumes:
– name: app
emptyDir: {}
So we created an AWS EC2 instance. We omitted specifying the keys because the AWS API is already authorized and this authorization will be used by Terraform.
Also, for code use, Terraform supports variables, data, and modules.
Let's create a separate network:
resource "aws_vpc" "my_vpc" {
cidr_block = "190.160.0.0/16"
instance_target = "default"
}
resource "aws_subnet" "my_subnet" {
vpc_id = "$ {aws_vpc.my_vpc.id}"
cidr_block = "190.160.1.0/24"
}
$ cat gce / provider.tf
provider "google" {
credentials = "$ {file (" account.json ")}"
project = "my-project-id"
region = "us-central1"
}
resource "google_compute_instance" "default" {
name = "test"
machine_type = "n1-standard-1"
zone = "us-central1-a"
}
$ cd gce
$ terraform init
$ terraform apply
$ cd ..
For distributed work, let's put the state in AWS S3 the state of the infrastructure (you can also put other data), but for security in a different region:
terraform {
backend "s3" {
bucket = "tfstate"
key = "terraform.tfstate"
region = "us-state-2"
}
}
provider "kubernetes" {
host = "https://104.196.242.174"
username = "ClusterMaster"
password = "MindTheGap"
}
resource "kubernetes_pod" "my_pod" {
spec {
container {
image = "Nginx: 1.7.9"
name = "Nginx"
port {
container_port = 80
}
}
}
}
Commands:
terraform init # downloading dependencies according to configs, checking them
terraform validate # syntax check
terraform plan # to see in detail how the infrastructure will be changed and why exactly so, for example,
whether only the service meta information will be changed or the service itself will be re-created, which is often unacceptable for databases.
terraform apply # applying changes
The common part for all providers is the core.
$