Skip to content
Snippets Groups Projects
Verified Commit ba789110 authored by Rafael László's avatar Rafael László :speech_balloon:
Browse files

Add istio with cert manager and fix iptables

parent 02e783b8
Branches master
No related tags found
No related merge requests found
# Cert Manager
## Chart
```bash
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace -f values.yaml
```
## Setup letsencrypt issuer
```bash
kubectl create -f issuer.yaml
```
If it says `context deadline exceeded` just delete the resource and create it again.
## Upgrade
Review the changelog and if everything is fine just upgrade the `values.yaml` file and then run a helm upgrade.
## Test it
There is an example deployment in the `example` folder.
```
kubectl create -f example/
```
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-nginx
namespace: default
spec:
selector:
matchLabels:
app: example-nginx
replicas: 1
template:
metadata:
labels:
app: example-nginx
spec:
containers:
- name: example-nginx
image: nginx:1.21.3
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "30m"
limits:
memory: "128Mi"
cpu: "100m"
---
apiVersion: v1
kind: Service
metadata:
name: example-service
namespace: default
spec:
selector:
app: example-nginx
ports:
- protocol: TCP
port: 80
targetPort: 80
\ No newline at end of file
# apiVersion: networking.k8s.io/v1
# kind: Ingress
# metadata:
# name: test-ingress
# namespace: default
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# cert-manager.io/cluster-issuer: "letsencrypt-prod"
# spec:
# rules:
# - host: test.k8s-01.sch.bme.hu
# http:
# paths:
# - backend:
# service:
# name: example-service
# port:
# number: 80
# pathType: Prefix
# path: /
# tls:
# - secretName: test-cert
# hosts:
# - test.k8s-01.sch.bme.hu
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: korte-gw-cert
namespace: istio-system
spec:
secretName: korte-gw-cert
commonName: korte.maze.sch.bme.hu
dnsNames:
- korte.maze.sch.bme.hu
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: korte-gateway
spec:
selector:
istio: ingressgateway # use Istio default gateway implementation
servers:
- port:
number: 443
name: https
protocol: HTTPS
tls:
mode: SIMPLE
credentialName: korte-gw-cert
hosts:
- "korte.maze.sch.bme.hu"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: korte
spec:
hosts:
- "korte.maze.sch.bme.hu"
gateways:
- korte-gateway
http:
- match:
- uri:
prefix: /
route:
- destination:
port:
number: 80
host: example-service.default.svc.cluster.local
\ No newline at end of file
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: cert-manager
spec:
acme:
email: rlacko99@gmail.com
privateKeySecretRef:
name: letsencrypt-account-key
server: 'https://acme-v02.api.letsencrypt.org/directory'
solvers:
- http01:
ingress:
class: istio
serviceType: ClusterIP
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
# Optional priority class to be used for the cert-manager pods
priorityClassName: ""
rbac:
create: true
podSecurityPolicy:
enabled: false
useAppArmor: true
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
logLevel: 2
leaderElection:
# Override the namespace used to store the ConfigMap for leader election
namespace: "kube-system"
# The duration that non-leader candidates will wait after observing a
# leadership renewal until attempting to acquire leadership of a led but
# unrenewed leader slot. This is effectively the maximum duration that a
# leader can be stopped before it is replaced by another candidate.
# leaseDuration: 60s
# The interval between attempts by the acting master to renew a leadership
# slot before it stops leading. This must be less than or equal to the
# lease duration.
# renewDeadline: 40s
# The duration the clients should wait between attempting acquisition and
# renewal of a leadership.
# retryPeriod: 15s
installCRDs: true
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Comma separated list of feature gates that should be enabled on the
# controller pod.
featureGates: ""
image:
repository: quay.io/jetstack/cert-manager-controller
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-controller
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
# resources. By default, the same namespace as cert-manager is deployed within is
# used. This namespace will not be automatically created by the Helm chart.
clusterResourceNamespace: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# Optional additional arguments
extraArgs: []
# Use this flag to set a namespace that cert-manager will use to store
# supporting resources required for each ClusterIssuer (default is kube-system)
# - --cluster-resource-namespace=kube-system
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
# - --enable-certificate-owner-ref=true
# Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver
# - --controllers=*,-certificaterequests-approver
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
resources:
requests:
cpu: 30m
memory: 32Mi
limits:
cpu: 60m
memory: 64Mi
# Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported
# securityContext:
# enabled: false
# fsGroup: 1001
# runAsUser: 1001
# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters
# you want to set, e.g.
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
# runAsNonRoot: true
# Container Security Context to be set on the controller component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
volumes: []
volumeMounts: []
# Optional additional annotations to add to the controller Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the controller Pods
# podAnnotations: {}
podLabels: {}
# Optional additional labels to add to the controller Service
# serviceLabels: {}
# Optional DNS settings, useful if you have a public and private DNS zone for
# the same domain on Route 53. What follows is an example of ensuring
# cert-manager can access an ingress or DNS TXT records at all times.
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
# the cluster to work.
# podDnsPolicy: "None"
# podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "8.8.8.8"
nodeSelector: {}
ingressShim: {}
# defaultIssuerName: ""
# defaultIssuerKind: ""
# defaultIssuerGroup: ""
prometheus:
enabled: true
servicemonitor:
enabled: false
prometheusInstance: default
targetPort: 9402
path: /metrics
interval: 60s
scrapeTimeout: 30s
labels: {}
# Use these variables to configure the HTTP_PROXY environment variables
# http_proxy: "http://proxy:8080"
# https_proxy: "https://proxy:8080"
# no_proxy: 127.0.0.1,localhost
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
webhook:
replicaCount: 1
timeoutSeconds: 10
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the webhook component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Container Security Context to be set on the webhook component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the webhook Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the webhook Pods
# podAnnotations: {}
# Optional additional annotations to add to the webhook MutatingWebhookConfiguration
# mutatingWebhookConfigurationAnnotations: {}
# Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
# validatingWebhookConfigurationAnnotations: {}
# Optional additional arguments for webhook
extraArgs: []
resources:
requests:
cpu: 30m
memory: 32Mi
limits:
cpu: 60m
memory: 64Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the Webhook Pods
podLabels: {}
# Optional additional labels to add to the Webhook Service
serviceLabels: {}
image:
repository: quay.io/jetstack/cert-manager-webhook
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-webhook
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# The port that the webhook should listen on for requests.
# In GKE private clusters, by default kubernetes apiservers are allowed to
# talk to the cluster nodes only on 443 and 10250. so configuring
# securePort: 10250, will work out of the box without needing to add firewall
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
securePort: 10250
# Specifies if the webhook should be started in hostNetwork mode.
#
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
# CNI (such as calico), because control-plane managed by AWS cannot communicate
# with pods' IP CIDR and admission webhooks are not working
#
# Since the default port for the webhook conflicts with kubelet on the host
# network, `webhook.securePort` should be changed to an available port if
# running in hostNetwork mode.
hostNetwork: false
# Specifies how the service should be handled. Useful if you want to expose the
# webhook to outside of the cluster. In some cases, the control plane cannot
# reach internal services.
serviceType: ClusterIP
# loadBalancerIP:
# Overrides the mutating webhook and validating webhook so they reach the webhook
# service using the `url` field instead of a service.
url: {}
# host:
cainjector:
enabled: true
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the cainjector component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Container Security Context to be set on the cainjector component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the cainjector Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the cainjector Pods
# podAnnotations: {}
# Optional additional arguments for cainjector
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the CA Injector Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-cainjector
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-cainjector
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# This startupapicheck is a Helm post-install hook that waits for the webhook
# endpoints to become available.
startupapicheck:
enabled: true
# Pod Security Context to be set on the startupapicheck component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Timeout for 'kubectl check api' command
timeout: 1m
# Job backoffLimit
backoffLimit: 4
# Optional additional annotations to add to the startupapicheck Job
jobAnnotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: hook-succeeded
# Optional additional annotations to add to the startupapicheck Pods
# podAnnotations: {}
# Optional additional arguments for startupapicheck
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the startupapicheck Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-ctl
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-ctl
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
rbac:
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: hook-succeeded
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the Job's ServiceAccount
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: hook-succeeded
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
...@@ -74,3 +74,10 @@ spec: ...@@ -74,3 +74,10 @@ spec:
operator: In operator: In
values: values:
- s390x - s390x
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
name: istio
spec:
controller: istio.io/ingress-controller
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
:OUTPUT ACCEPT [0:0] :OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0] :POSTROUTING ACCEPT [0:0]
-A PREROUTING -d 152.66.211.122 -p tcp --dport 80 -j DNAT --to-destination 192.168.44.1:80
-A PREROUTING -d 152.66.211.122 -p tcp --dport 443 -j DNAT --to-destination 192.168.44.1:443
-A POSTROUTING -o wan0 -j MASQUERADE -A POSTROUTING -o wan0 -j MASQUERADE
COMMIT COMMIT
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment