argocd/cilium/templates/cilium-operator/deployment.yaml
2024-11-08 12:56:42 +01:00

412 lines
15 KiB
YAML

{{- if .Values.operator.enabled }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: {{ .Release.Namespace }}
{{- with .Values.operator.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: {{ .Values.operator.replicas }}
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
# of one replica and no user configured Recreate strategy.
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
{{- if and (eq (.Values.operator.replicas | toString) "1") (eq .Values.operator.updateStrategy.type "RollingUpdate") }}
strategy:
rollingUpdate:
maxSurge: {{ .Values.operator.updateStrategy.rollingUpdate.maxSurge }}
maxUnavailable: 100%
type: RollingUpdate
{{- else }}
{{- with .Values.operator.updateStrategy }}
strategy:
{{- toYaml . | trim | nindent 4 }}
{{- end }}
{{- end }}
template:
metadata:
annotations:
{{- if .Values.operator.rollOutPods }}
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: {{ include (print $.Template.BasePath "/cilium-configmap.yaml") . | sha256sum | quote }}
{{- end }}
{{- if and .Values.operator.prometheus.enabled (not .Values.operator.prometheus.serviceMonitor.enabled) }}
prometheus.io/port: {{ .Values.operator.prometheus.port | quote }}
prometheus.io/scrape: "true"
{{- end }}
{{- with .Values.operator.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
{{- with .Values.operator.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.operator.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: cilium-operator
image: {{ include "cilium.operator.image" . | quote }}
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
command:
- cilium-operator-{{ include "cilium.operator.cloud" . }}
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
{{- with .Values.operator.extraArgs }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
{{- if .Values.clustermesh.enableEndpointSliceSynchronization }}
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
{{- end }}
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
{{- if and .Values.eni.enabled (not .Values.eni.iamRole ) }}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_ACCESS_KEY_ID
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_SECRET_ACCESS_KEY
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: cilium-aws
key: AWS_DEFAULT_REGION
optional: true
{{- end }}
{{- if .Values.alibabacloud.enabled }}
- name: ALIBABA_CLOUD_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: cilium-alibabacloud
key: ALIBABA_CLOUD_ACCESS_KEY_ID
optional: true
- name: ALIBABA_CLOUD_ACCESS_KEY_SECRET
valueFrom:
secretKeyRef:
name: cilium-alibabacloud
key: ALIBABA_CLOUD_ACCESS_KEY_SECRET
optional: true
{{- end }}
{{- if .Values.k8sServiceHost }}
- name: KUBERNETES_SERVICE_HOST
value: {{ include "k8sServiceHost" . }}
- name: KUBERNETES_SERVICE_PORT
value: {{ include "k8sServicePort" . }}
{{- end }}
{{- if .Values.azure.enabled }}
{{- if .Values.azure.subscriptionID }}
- name: AZURE_SUBSCRIPTION_ID
value: {{ .Values.azure.subscriptionID }}
{{- end }}
{{- if .Values.azure.tenantID }}
- name: AZURE_TENANT_ID
value: {{ .Values.azure.tenantID }}
{{- end }}
{{- if .Values.azure.resourceGroup }}
- name: AZURE_RESOURCE_GROUP
value: {{ .Values.azure.resourceGroup }}
{{- end }}
- name: AZURE_CLIENT_ID
valueFrom:
secretKeyRef:
name: cilium-azure
key: AZURE_CLIENT_ID
- name: AZURE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: cilium-azure
key: AZURE_CLIENT_SECRET
{{- end }}
{{- with .Values.operator.extraEnv }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.operator.prometheus.enabled }}
ports:
- name: prometheus
containerPort: {{ .Values.operator.prometheus.port }}
{{- if .Values.operator.hostNetwork }}
hostPort: {{ .Values.operator.prometheus.port }}
{{- end }}
protocol: TCP
{{- end }}
livenessProbe:
httpGet:
{{- if .Values.operator.hostNetwork }}
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
{{- end }}
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
readinessProbe:
httpGet:
{{- if .Values.operator.hostNetwork }}
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
{{- end }}
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 5
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
{{- if .Values.etcd.enabled }}
- name: etcd-config-path
mountPath: /var/lib/etcd-config
readOnly: true
{{- if .Values.etcd.ssl }}
- name: etcd-secrets
mountPath: /var/lib/etcd-secrets
readOnly: true
{{- end }}
{{- end }}
{{- if .Values.clustermesh.enableEndpointSliceSynchronization }}
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
{{- end }}
{{- if .Values.kubeConfigPath }}
- name: kube-config
mountPath: {{ .Values.kubeConfigPath }}
readOnly: true
{{- end }}
{{- if .Values.authentication.mutual.spire.enabled }}
- name: spire-agent-socket
mountPath: {{ dir .Values.authentication.mutual.spire.agentSocketPath }}
readOnly: true
{{- end }}
{{- range .Values.operator.extraHostPathMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- if .mountPropagation }}
mountPropagation: {{ .mountPropagation }}
{{- end }}
{{- end }}
{{- if .Values.bgp.enabled }}
- name: bgp-config-path
mountPath: /var/lib/cilium/bgp
readOnly: true
{{- end }}
{{- with .Values.operator.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.operator.resources }}
resources:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
{{- with .Values.operator.securityContext }}
securityContext:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: {{ .Values.operator.hostNetwork }}
{{- if .Values.operator.dnsPolicy }}
dnsPolicy: {{ .Values.operator.dnsPolicy }}
{{- end }}
restartPolicy: Always
priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.operator.priorityClassName "system-cluster-critical") }}
serviceAccountName: {{ .Values.serviceAccounts.operator.name | quote }}
automountServiceAccountToken: {{ .Values.serviceAccounts.operator.automount }}
{{- with .Values.operator.affinity }}
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
{{- toYaml . | trim | nindent 8 }}
{{- end }}
{{- with .Values.operator.topologySpreadConstraints }}
topologySpreadConstraints:
{{- range $constraint := . }}
- {{ toYaml $constraint | nindent 8 | trim }}
{{- if not $constraint.labelSelector }}
labelSelector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.operator.nodeSelector }}
nodeSelector:
{{- toYaml . | trim | nindent 8 }}
{{- end }}
{{- if and .Values.clustermesh.enableEndpointSliceSynchronization .Values.clustermesh.config.enabled (not (and .Values.clustermesh.useAPIServer .Values.clustermesh.apiserver.kvstoremesh.enabled )) }}
hostAliases:
{{- range $cluster := .Values.clustermesh.config.clusters }}
{{- range $ip := $cluster.ips }}
- ip: {{ $ip }}
hostnames: [ "{{ $cluster.name }}.{{ $.Values.clustermesh.config.domain }}" ]
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.operator.tolerations }}
tolerations:
{{- toYaml . | trim | nindent 8 }}
{{- end }}
volumes:
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
{{- if .Values.etcd.enabled }}
# To read the etcd config stored in config maps
- name: etcd-config-path
configMap:
name: cilium-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: etcd-config
path: etcd.config
{{- if .Values.etcd.ssl }}
# To read the k8s etcd secrets in case the user might want to use TLS
- name: etcd-secrets
secret:
secretName: cilium-etcd-secrets
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
optional: true
{{- end }}
{{- end }}
{{- if .Values.kubeConfigPath }}
- name: kube-config
hostPath:
path: {{ .Values.kubeConfigPath }}
type: FileOrCreate
{{- end }}
{{- range .Values.operator.extraHostPathMounts }}
- name: {{ .name }}
hostPath:
path: {{ .hostPath }}
{{- if .hostPathType }}
type: {{ .hostPathType }}
{{- end }}
{{- end }}
{{- if .Values.bgp.enabled }}
- name: bgp-config-path
configMap:
name: bgp-config
{{- end }}
{{- if .Values.authentication.mutual.spire.enabled }}
- name: spire-agent-socket
hostPath:
path: {{ dir .Values.authentication.mutual.spire.agentSocketPath }}
type: DirectoryOrCreate
{{- end }}
{{- with .Values.operator.extraVolumes }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if .Values.clustermesh.enableEndpointSliceSynchronization }}
# To read the clustermesh configuration
- name: clustermesh-secrets
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: cilium-clustermesh
optional: true
# note: items are not explicitly listed here, since the entries of this secret
# depend on the peers configured, and that would cause a restart of all operators
# at every addition/removal. Leaving the field empty makes each secret entry
# to be automatically projected into the volume as a file whose name is the key.
- secret:
name: clustermesh-apiserver-remote-cert
optional: true
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
{{- if not .Values.tls.caBundle.enabled }}
- key: ca.crt
path: common-etcd-client-ca.crt
{{- else }}
- {{ .Values.tls.caBundle.useSecret | ternary "secret" "configMap" }}:
name: {{ .Values.tls.caBundle.name }}
optional: true
items:
- key: {{ .Values.tls.caBundle.key }}
path: common-etcd-client-ca.crt
{{- end }}
# note: we configure the volume for the kvstoremesh-specific certificate
# regardless of whether KVStoreMesh is enabled or not, so that it can be
# automatically mounted in case KVStoreMesh gets subsequently enabled,
# without requiring an operator restart.
- secret:
name: clustermesh-apiserver-local-cert
optional: true
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
{{- if not .Values.tls.caBundle.enabled }}
- key: ca.crt
path: local-etcd-client-ca.crt
{{- else }}
- {{ .Values.tls.caBundle.useSecret | ternary "secret" "configMap" }}:
name: {{ .Values.tls.caBundle.name }}
optional: true
items:
- key: {{ .Values.tls.caBundle.key }}
path: local-etcd-client-ca.crt
{{- end }}
{{- end }}
{{- end }}