veda/talos/patches/cilium.yaml

2004 lines
92 KiB
YAML

cluster:
network:
cni:
name: none
proxy:
disabled: true
inlineManifests:
- name: cilium
contents: |
---
# Source: cilium/templates/cilium-secrets-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
---
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-envoy/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-envoy"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-operator"
namespace: kube-system
---
# Source: cilium/templates/hubble-relay/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "hubble-relay"
namespace: kube-system
automountServiceAccountToken: false
---
# Source: cilium/templates/hubble-ui/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "hubble-ui"
namespace: kube-system
---
# Source: cilium/templates/cilium-ca-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: cilium-ca
namespace: kube-system
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU9tQ1Qzc0V4S2wydjNuQzZuVUYvSmt3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURVd01qRTVNRFl4TUZvWERUSTRNRFV3TVRFNQpNRFl4TUZvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF2MDkrMlpuaWtVQXQzWm9CTlByY1h6NGFTTXprTzVFY255c1hpWUlFeXE4YkdJNlQKeXBvbXdiRDhQZExuY2xPenVRNXg2eXNQWDZnNjRyY1dUczNZc0RtZUp6WWdBNTRZMFBZTUZ6UWs1TUhMS1ZTZQpRNHZNbHFSRHI4bmRDVDVia3JsWGltZXdxT2pXRWhDRUNud091WmZqZklYdmRuYWRRMmh3TzNKemVRdEZOeHA5CnhsU3BDbGxVT25JVHZVTEJ4VXhGVGNnSE11ekNpSGFZN2dVY2Q0T1NNMnVzWXc2aHdYKzNwQVl3citmUmR1N2EKMXFKUzNmU2RHcHdycVBmR01TVUNod0tSTFpYQ2VpSzAzd0pTN3I5Q1k2aTZUdXlEY1BhNlQxUXlMaHpHV0VMTQo0cWhWdnNSRFNBelpKUWUrTjR4VU9nbFlrbUNJNFF0Y1NvWlFVUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGTXQxb2lwUjVCVFNEMEVERkZjdzVoYUh6cFVFTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlFEcXNQV1FDcDRSTkc5OWJpeWFFaFNvc0lsdmFwY0V2R2JoRk5ieUxOYlBaYkwxOTdXRUQwCldjemJ0TXN1MWFSOVN2Umg4Y3MrUEw5SUluK2grYWJUVzEvakVqSFlNS01qU0Z0Z1JMS0N0aDBmVkc0T0puRzkKV2hubGZuTUl2T3I4Z3pnWU0zZldaRHJSeG82MXBQQjhJN0RWQk9FWXZMdDI2SFVOTGdxTkg0bStPRzdVeEVHMwpwb0xQYnNRNXZUUGQ4V2UyYjNZL092bnloQytVUFZaN292L0d1ZnRlRXFQTEkzQjRHaDRsUmZGYk9ZdmxQRnlHClZ2SzNMTXR1Zmw4aXRXUXZtUHdJQkEzYW92YXFYQUZhMnhUbmcyb2NTV3Y1dGRWb0tLMmd2cU5VK215WFZ6L0sKTGQ2VTZKSFROSHhTWlBVMU1kMHpFSnRZbEI0ZWliUFEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdjA5KzJabmlrVUF0M1pvQk5QcmNYejRhU016a081RWNueXNYaVlJRXlxOGJHSTZUCnlwb213YkQ4UGRMbmNsT3p1UTV4NnlzUFg2ZzY0cmNXVHMzWXNEbWVKellnQTU0WTBQWU1GelFrNU1ITEtWU2UKUTR2TWxxUkRyOG5kQ1Q1YmtybFhpbWV3cU9qV0VoQ0VDbndPdVpmamZJWHZkbmFkUTJod08zSnplUXRGTnhwOQp4bFNwQ2xsVU9uSVR2VUxCeFV4RlRjZ0hNdXpDaUhhWTdnVWNkNE9TTTJ1c1l3Nmh3WCszcEFZd3IrZlJkdTdhCjFxSlMzZlNkR3B3cnFQZkdNU1VDaHdLUkxaWENlaUswM3dKUzdyOUNZNmk2VHV5RGNQYTZUMVF5TGh6R1dFTE0KNHFoVnZzUkRTQXpaSlFlK040eFVPZ2xZa21DSTRRdGNTb1pRVVFJREFRQUJBb0lCQUFWNUxjVGwvVEJ3cUo5RgpiN0ZuVi9TMW55VGtEOGVRZ0VRaGhIdmZwMmx4RnhKZHllYXJBRjdjUkdzMFpJREYya2Q2SFZKQUdpRi9lNmZPCjJ5bUlCQ0lPay9MRlBxYzcxN1BxeGkvSHd5SjBWTzZxeUJoZ29JclN5a2oyT29ld0xBaUhtdlNQd09Na1BqT28KRzA3OWU2SmRzUmwzNzREeEpkQ3U1VDhlS0NXWFd6OHd5cER5YmczTmlxZjNYeWJHcDFGN3RLamZSSk1wWWVUbQowRWlhcCtRMUkxcEh5NXp3YnUwclhxbmJqNS9OUTRBRjFMMDhOTTNxNzd2TTczL09icjhWN0ZuRjcvd1JwV2VTCjF2ZGF5SnUvdmlqUDFrZ3pvL3F3NGhMcmM2ZDAza0p4dTB2d3J1VGV5YmovempLQytVOWNWb3UyUFJwUHYzcXkKdUM5OWYyMENnWUVBOE43cytYK2sxMUsrcVkyMWxhWDZIc0ZUN0NaZGtCUVVrdGhNRDVPZzZBMFFsaUYyWXBhVwp3ZGl4eWpnR3BVZlV3OFFNcTRMazNiejcxK1BhZ0RtTHlVOC9CVXZFWmZQQktiRDZGNm0zVURGTVFqbVNzdExaCnUvTmxRSXh2aERXdlNlNEk4QVZ4Uk4yZ3o1NWZIRE5DOFBrRy9JL2pmUVAwTkpSdnRrc2F4eWNDZ1lFQXkxT3EKd0QvL1NBOVFWSFB2UkZFelV4LzB4MlRkbzI5bnJObTJVa1hyY1E0bUNqWmtjT0lydFJlWVN1cUlzZHdCckN6RApPV3ZwOWtXOEQ0WWdwK3V0TE44VGlDQ1VXcjBXNHlxY0x5TENOWmdncnZ6azNVcGl3MWxuM1hVaVVIVWwyRUpjCkNBMnY2NnEwZ1h0U1h0UmtCaUlJa0ZHMEpwTVY3cWhVemJOSEY4Y0NnWUVBdXVxemlMWG1ESFlZb09qb0ZiWVUKVDlPRXRZZmNLVmQ3MDZ4bjhkcUZEaTQ5UmRPMzA2KzZVN2s5MW9mOW1Ec1N2OHdlYk9ZSmhvMEYvalhDTktqegpjZ2UrQnZVUjltL2U0T1NHWHVDRDhZWWdBSEEvQitnWnNRR2kvQ0UrMUtENFl1MkdJQVNKTG9DeUx3dGVFVHN5CllsOU5iYWFYU1VkTk5aUk8wRzZkM0JjQ2dZRUF5QktFS3JqT2tqRy9jZHJDemc2NDJrclBxb1R1TXl3a2NFbXEKeGpwVyt1QlpQaU5Yb2x4cmRiN0o5SzNqNUdBQXBWTkdwdkl4UGt4UVNWWktPL0lFYmVyMFpUYnB4SC9jVFp4WQpJMm1vZ09RNUJQZUVuTnZkWXl4T3IvUGEyQ0R1SkdibitHUy9mcjV5OEJaUmIwNi9QZW12VTVPU0dZTVNTYzBKCjVYOUxyWEVDZ1lFQXdkYWg0WjVPdm1QVXNZN2dRbzUvc0JTUG1VYVJQL0kzdDVIV0hPMThZa2RqaFlsek4xUUwKRmVnUVgyUXU4OTJMSWFDeEVUeHpEcjVrN3dCdFNvSUtsZ3M0cWs2UjVMTit0M203WDVSKzNiQnYyQkNwT3Z3TQpSbWpLZmFkVTJ4OHlYenZBdjNaQzJJcmxuc3dNNjZ6WmFKVkRVcjkrdWxQNVVacEhUZmdWalo0PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
---
# Source: cilium/templates/hubble/tls-helm/relay-client-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: hubble-relay-client-certs
namespace: kube-system
type: kubernetes.io/tls
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU9tQ1Qzc0V4S2wydjNuQzZuVUYvSmt3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURVd01qRTVNRFl4TUZvWERUSTRNRFV3TVRFNQpNRFl4TUZvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF2MDkrMlpuaWtVQXQzWm9CTlByY1h6NGFTTXprTzVFY255c1hpWUlFeXE4YkdJNlQKeXBvbXdiRDhQZExuY2xPenVRNXg2eXNQWDZnNjRyY1dUczNZc0RtZUp6WWdBNTRZMFBZTUZ6UWs1TUhMS1ZTZQpRNHZNbHFSRHI4bmRDVDVia3JsWGltZXdxT2pXRWhDRUNud091WmZqZklYdmRuYWRRMmh3TzNKemVRdEZOeHA5CnhsU3BDbGxVT25JVHZVTEJ4VXhGVGNnSE11ekNpSGFZN2dVY2Q0T1NNMnVzWXc2aHdYKzNwQVl3citmUmR1N2EKMXFKUzNmU2RHcHdycVBmR01TVUNod0tSTFpYQ2VpSzAzd0pTN3I5Q1k2aTZUdXlEY1BhNlQxUXlMaHpHV0VMTQo0cWhWdnNSRFNBelpKUWUrTjR4VU9nbFlrbUNJNFF0Y1NvWlFVUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGTXQxb2lwUjVCVFNEMEVERkZjdzVoYUh6cFVFTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlFEcXNQV1FDcDRSTkc5OWJpeWFFaFNvc0lsdmFwY0V2R2JoRk5ieUxOYlBaYkwxOTdXRUQwCldjemJ0TXN1MWFSOVN2Umg4Y3MrUEw5SUluK2grYWJUVzEvakVqSFlNS01qU0Z0Z1JMS0N0aDBmVkc0T0puRzkKV2hubGZuTUl2T3I4Z3pnWU0zZldaRHJSeG82MXBQQjhJN0RWQk9FWXZMdDI2SFVOTGdxTkg0bStPRzdVeEVHMwpwb0xQYnNRNXZUUGQ4V2UyYjNZL092bnloQytVUFZaN292L0d1ZnRlRXFQTEkzQjRHaDRsUmZGYk9ZdmxQRnlHClZ2SzNMTXR1Zmw4aXRXUXZtUHdJQkEzYW92YXFYQUZhMnhUbmcyb2NTV3Y1dGRWb0tLMmd2cU5VK215WFZ6L0sKTGQ2VTZKSFROSHhTWlBVMU1kMHpFSnRZbEI0ZWliUFEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTVENDQWpHZ0F3SUJBZ0lSQU1tYVdOeXNyWWZqalJERmZyU2dtUWt3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURVd01qRTVNRFl4TVZvWERUSTJNRFV3TWpFNQpNRFl4TVZvd0l6RWhNQjhHQTFVRUF3d1lLaTVvZFdKaWJHVXRjbVZzWVhrdVkybHNhWFZ0TG1sdk1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXFnSmgyOCtkRVJBN2xCbGpiT2FxVjBoNnorRnAKRzlXV2ZUOFRpdTkrbnJmTmIrWk9EQ0NnaVBXNDQ4OFF0VTlhcGM0WWpKY2lheUdrYlhQVUcrK3NvYmNZckFENwpZbklsK0VNcHhLeWNISlpiUCtuNEtKSERPbHVESmtYZG45KzhyZzVmakhwQ2VKenVaOE5DbmUvNGRZUnFtZEVsClpacjJndDNJYTFtOG9WRjJHS3FZT1AvZkNVcDQ0S052cGtyVFkvMjNKMGZGaDBCNUgyWi83bUhZYmtYbjFlYUYKdDZhTTJDYWt2WmQyVWErVWMxOFFsOU1Ld3B0UWNzWS9FN2QxRUxKTVdkZ1hzQVFqUEJwQ0VEeVJjbUpWMlVWcwpsMExweXNNeFhkVndNVDRtZTM1VGgyRkRjMURPY04wZjZXcXg2MDUvSUJ5NS9IbFM1Mjh2SG5LMk9RSURBUUFCCm80R0dNSUdETUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUIKQlFVSEF3SXdEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCVExkYUlxVWVRVTBnOUJBeFJYTU9ZVwpoODZWQkRBakJnTlZIUkVFSERBYWdoZ3FMbWgxWW1Kc1pTMXlaV3hoZVM1amFXeHBkVzB1YVc4d0RRWUpLb1pJCmh2Y05BUUVMQlFBRGdnRUJBSzFBeE4zT0M0Ym12SWVaZHpRd1hQOFI5U2xicVM4SExDcW12UzFyU0pXS3dNa1QKTkIwWFdtUHlSYm5kVFN2U3lsZURZSWY5RmQ1RHhQczd1S3JibWNCTXJsdTlZanE5TGN0ODBSdHBqMElCUElISApDRUg3UE9LaDdXSTJUbkdYbnkrUzYzNktKMWxSRFpvVWZZMWkwRFJWUVczNUM3MEE2SGpyamJhbDVGK1Z5Y3VvCjJnUEJTdXUyMzBicGZPdG5JdGplL1BOelVPL2EwOTZnbzBDa2xFaVRwV1lzYjVZZzBLKzZKK3BGalcvRjFOZ0cKVDJpbmw0QUVlWkIzM3c0WldISHJEQ09MemRVajhPbWlNMzlHYkM2alBjc3R6dk9aT2dEUlBQV3h3T3pwZERSTwo2OHpoZVVtUkNHY0w2cTlVRGc2amt0a3lmOHY0cEI2ZTlyYWpSTEk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcWdKaDI4K2RFUkE3bEJsamJPYXFWMGg2eitGcEc5V1dmVDhUaXU5K25yZk5iK1pPCkRDQ2dpUFc0NDg4UXRVOWFwYzRZakpjaWF5R2tiWFBVRysrc29iY1lyQUQ3WW5JbCtFTXB4S3ljSEpaYlArbjQKS0pIRE9sdURKa1hkbjkrOHJnNWZqSHBDZUp6dVo4TkNuZS80ZFlScW1kRWxaWnIyZ3QzSWExbThvVkYyR0txWQpPUC9mQ1VwNDRLTnZwa3JUWS8yM0owZkZoMEI1SDJaLzdtSFlia1huMWVhRnQ2YU0yQ2FrdlpkMlVhK1VjMThRCmw5TUt3cHRRY3NZL0U3ZDFFTEpNV2RnWHNBUWpQQnBDRUR5UmNtSlYyVVZzbDBMcHlzTXhYZFZ3TVQ0bWUzNVQKaDJGRGMxRE9jTjBmNldxeDYwNS9JQnk1L0hsUzUyOHZIbksyT1FJREFRQUJBb0lCQUJtd2VIY05uUzlKNDZiUwpXMHJHWkRFQ084U1R6QjB2bmlqWTBpd3BXbG5EYm1DZFRXZlp1a2hYMFJOalk2UHlGTUYvTGdoNGdmUHNSbnRmCmlFcWNZZHFoM3pHR2lIWHN4NzRHOEJYTysxdjZnRmxkRW1tVzlRZ3BjZHpqcThTUHUyUVZ1ZXh4UVFBV3ZwT2gKVnJ5azJVZHd0ZWIrU29kYTdpc0NObG02V3AySmdaM0c3M2txQllHWWpLUm1ZWGhiR0k4Ty81ZkdHOSsrQUZEbQppTmJNSkRwMGVtT1F2UWpvcDRMWkRSRzNqMnFrOVArZSsyemJaVllyYVF0U3BMOEJZNC9Cc3ZXdEd0NU96OE1GCjhoQk9vOHk4OHFVTWhpdml4dDJuZXdaVW54Um84UG5pcFhjby9IV0JnY2V0cWRhc1Jud25tRzdWNTR3WUJUTkUKZGpUZGRIRUNnWUVBMVNxZ3JjQmZsSmZHcWdST3hmR0FJZnA0dnI5SFJnOENwLzV1OGxheXJ4QVhSV1QwQ1FNZwo3NzNXd3JJdWRuV0pCb2xWMnQvbmFtekIxTEtja0FkVUI3cG9CL2ZZSWpBTVlDT0tjWWxGVmlDM001aGJqWWlnCmZQY0ZiRmwvYzNDMHR2MXJPdk5HMkQ5cTd2L2liRTVvWmlSS0x3eUI1WUJnWXhlYnlqWkREU1VDZ1lFQXpDdTYKNzNVemQ3Zk5OOURuY21SMWR4MWY0aTQxRm1QbWNKckhsTEZoWUVKdEVYRjJBQS9jVlFTVk40TmVBbW4xU3NQTgpuMmFGWWE4eVA5WGlzaUk2a1hrQVBPYjVkTlI3Y2F1YndWOUVRNklZd2NkVlJvYngrV3NRQ1BVbEVTeGl1aG9yCjAzd0dROWhjZnBtQWQwTGEzcGs3cTMvaWg3VkR2TzRiRU9qUXVvVUNnWUJMcXpZZWQ0dDJVK3BSci9YR2psUE4KdS92Z3VrV3l1T0ZZZ1JraExXV0FmMVhEYkhLMFFFZ1EwbkxKNmpaVUZxVHRqUkxNLzI1R2VSVFZUQklIanBKbApxK1lCN2hqckNlTmF2VU1IeXBLeVlDcW9SZzA1MEtZbnRnaFd6ZTVRTEZINkl6dE5VYzUwTlE3Z2lEVDMvVjFSCjVNL3dvNS80dC9sL000STU1Y2tuZ1FLQmdRQzdjMkxidXhVWDAwNjg4aldzMXdMcDY3UStxd0hlbnJYUnFLbVkKb1Fxd0lBeVhPVDZIYkFoMG5oS0p0emkycXFXa2tRVFdWb0FiWVlpWEJ6NXRJZlAzMzhvU0lGWGpwN24yY1FJOQpsY1BNL0tRU05YcmJsOFhOb3poRG9SNzVGM0ZGUUFZQTB4dkZhbjN4VmVtV2xYRHhjbXBRV01lWXBSbWQrbFlyClBqVjFLUUtCZ1FDYzVRNlJkQU9HS2NLbTVxSXBCMXI2OUdjc1pTWXZhYzg4WTdlanN1VTV6YXhxdG1nM2ZGNTQKTUxnNnN2NmtXVEpMS0ZKZ0RPd2dLQUFCdmpKWHpReWZXTEROb3h0dDRFRzBHN0lKS1cwZVR5azZNTmZzOUgvcwpFbGtBVE0xWG95bDU3TXFyN1ZYVWxtNW9GbzVQZE9OcHZsUDkrRVJGUzYxSUQ0bC9McXJxRFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
---
# Source: cilium/templates/hubble/tls-helm/server-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: hubble-server-certs
namespace: kube-system
type: kubernetes.io/tls
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU9tQ1Qzc0V4S2wydjNuQzZuVUYvSmt3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTURVd01qRTVNRFl4TUZvWERUSTRNRFV3TVRFNQpNRFl4TUZvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUF2MDkrMlpuaWtVQXQzWm9CTlByY1h6NGFTTXprTzVFY255c1hpWUlFeXE4YkdJNlQKeXBvbXdiRDhQZExuY2xPenVRNXg2eXNQWDZnNjRyY1dUczNZc0RtZUp6WWdBNTRZMFBZTUZ6UWs1TUhMS1ZTZQpRNHZNbHFSRHI4bmRDVDVia3JsWGltZXdxT2pXRWhDRUNud091WmZqZklYdmRuYWRRMmh3TzNKemVRdEZOeHA5CnhsU3BDbGxVT25JVHZVTEJ4VXhGVGNnSE11ekNpSGFZN2dVY2Q0T1NNMnVzWXc2aHdYKzNwQVl3citmUmR1N2EKMXFKUzNmU2RHcHdycVBmR01TVUNod0tSTFpYQ2VpSzAzd0pTN3I5Q1k2aTZUdXlEY1BhNlQxUXlMaHpHV0VMTQo0cWhWdnNSRFNBelpKUWUrTjR4VU9nbFlrbUNJNFF0Y1NvWlFVUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGTXQxb2lwUjVCVFNEMEVERkZjdzVoYUh6cFVFTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlFEcXNQV1FDcDRSTkc5OWJpeWFFaFNvc0lsdmFwY0V2R2JoRk5ieUxOYlBaYkwxOTdXRUQwCldjemJ0TXN1MWFSOVN2Umg4Y3MrUEw5SUluK2grYWJUVzEvakVqSFlNS01qU0Z0Z1JMS0N0aDBmVkc0T0puRzkKV2hubGZuTUl2T3I4Z3pnWU0zZldaRHJSeG82MXBQQjhJN0RWQk9FWXZMdDI2SFVOTGdxTkg0bStPRzdVeEVHMwpwb0xQYnNRNXZUUGQ4V2UyYjNZL092bnloQytVUFZaN292L0d1ZnRlRXFQTEkzQjRHaDRsUmZGYk9ZdmxQRnlHClZ2SzNMTXR1Zmw4aXRXUXZtUHdJQkEzYW92YXFYQUZhMnhUbmcyb2NTV3Y1dGRWb0tLMmd2cU5VK215WFZ6L0sKTGQ2VTZKSFROSHhTWlBVMU1kMHpFSnRZbEI0ZWliUFEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWakNDQWo2Z0F3SUJBZ0lRTFlYSzJVbmxIS3NvTDdCZS95Y1BNVEFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV3TlRBeU1Ua3dOakV4V2hjTk1qWXdOVEF5TVRrdwpOakV4V2pBcU1TZ3dKZ1lEVlFRRERCOHFMbVJsWm1GMWJIUXVhSFZpWW14bExXZHljR011WTJsc2FYVnRMbWx2Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNWhNQlNtNVNLeWhaRG9udXRoMmoKeHVYOXA2YkdhZUV6L2gvOEthSSthSTJuTHp5L1djeWVKWER6VmMwbjMzQ1JwSFhVYVJuMkNKSTc4YTl2V08zNwpTaFhMMmx3aG5PZldvVmdDbmdNam5yVFdnVTRxRUlkT25jOTNhRWpUQnp3ZWpnL1pON05iZjBvMWhBOXBWUjdaCkFRUWVtZkJyTHBqN0dra25FalBpRWlVN21PcnlucU9ReWV6ZVkyMnQzbXZBN3drSEJjcnVwSC9QQ0hGUE1IUlUKSnoxL04xbFlNbnFxbVJZMGJqMlNrMC9ObU5NN0FKblI0K0FxTnZIQWg1VGw0U1lBME16a0ptekFXemxwbTFUYgpKR2ZPbkgzNTNYb0s3dm93TlNuRWt3QVJpRFZZWDVwenBqamFSNStkOTNjVUE1bkp3cFV2QmdtQy9iMU9VVFJoCmp3SURBUUFCbzRHTk1JR0tNQTRHQTFVZER3RUIvd1FFQXdJRm9EQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQVFZSUt3WUJCUVVIQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUTGRhSXFVZVFVMGc5QgpBeFJYTU9ZV2g4NlZCREFxQmdOVkhSRUVJekFoZ2g4cUxtUmxabUYxYkhRdWFIVmlZbXhsTFdkeWNHTXVZMmxzCmFYVnRMbWx2TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDQjBjQXRPbXh4SFdQcVhyWjRhQUVSM25hT1gyd2EKODVsVHVzZ0RpbTcwbXVhK1pqRXAxU2psME40S2ZtQlJZQXJqcFlvR2xSRTVmOGMwaU1RRzlxMkNpUXZYVHFxLwpTRVcyaS9EaDZGc1BkN1VpUjBqcVNuTGJLRmp6alBNUm5wdUY0VHVBMk9MMmVaQW4rWDE5MWVEQXRQUWRPWXloCjFTbWc4eEkzem54OVEvU3BYb3BsZFFBaFRoN1F2RGJvZnVNcnFvbE9YUytFT2kwaHVZRnVzUnVSMEwyTFZXcEcKOTNoY1ptY1pBcVFlcnprL05hM2QyTi9zOUVzNEVQVnZTcWZiYVpveG9LZm83TWJvTlRWQ3hUdWFxZnQwYTZsQwp0UnhjQ3JWUVJrUDJjWVlMdHIvUHEyUnRYWnRodXhOWVh1TW5BbU9yZmVtYzZ2d3U3U0crZTYyMgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBNWhNQlNtNVNLeWhaRG9udXRoMmp4dVg5cDZiR2FlRXovaC84S2FJK2FJMm5MenkvCldjeWVKWER6VmMwbjMzQ1JwSFhVYVJuMkNKSTc4YTl2V08zN1NoWEwybHdobk9mV29WZ0NuZ01qbnJUV2dVNHEKRUlkT25jOTNhRWpUQnp3ZWpnL1pON05iZjBvMWhBOXBWUjdaQVFRZW1mQnJMcGo3R2trbkVqUGlFaVU3bU9yeQpucU9ReWV6ZVkyMnQzbXZBN3drSEJjcnVwSC9QQ0hGUE1IUlVKejEvTjFsWU1ucXFtUlkwYmoyU2swL05tTk03CkFKblI0K0FxTnZIQWg1VGw0U1lBME16a0ptekFXemxwbTFUYkpHZk9uSDM1M1hvSzd2b3dOU25Fa3dBUmlEVlkKWDVwenBqamFSNStkOTNjVUE1bkp3cFV2QmdtQy9iMU9VVFJoandJREFRQUJBb0lCQVFERFpZS2tIdnVqOFpsbwo4cFlNanpHbjIyNUlzeWd4aGZTdEw2YnVDUCt0dGhnMTB3YkVFamFCbitMWmpPbi9KdEJhZDBQVm14bkwvNzN2CkxjcUlJVjFZZHhoeE0rdk1JVTIwSzFYVnN0N09pOURkY20vaEo3a0o1OGdHaGtyTEN6RDcvOVBBTFpCWWs4TE4KakhVdHVJd2VBOHlsZW56Y3owbjY4Z08vdnRhU0J2YUhva3llK2VxY1Z4VGJEU01rK0QrZkkvS29MbmVJemJuQwppVHJKT3lWVWJYaFA4TjRRQ1BCSEh2VFB6MGcrOVo0WXFRVGJ4U3RnRXpjU1FjYitLeFlIQm5VVlJaWDFHTzRsCll5dUVaSktOcWMwejhUVXA2NEpPY3BBYkR5anFJSUswcUQ1WkU2dnpMVmFXc3hBLy8zRlVUSEVDbUFhNEp0NkkKbG4yYmhZRHBBb0dCQVB3RlUvdS93NVYzRDB1dTJMaVRha1pRS1daU2FQZkt5Qmp1KzRoUnh1U3FZVTZHdnNJcQpmQit5Tjl5UG92U3NDRDBteVJKak5GZTdGdFpxdDIyU1A4UXMxOGR1d3FiTnV5cDdwSTB3THlXNWhwUUMyaFZ5CnNpVm9FMjJrVlNUc1ZXU3IyYjQwWnZOcVhRV3BBVkZ2empRSWJ6SVM0WnNuLzV2eWtzdWs5eWhyQW9HQkFPbTAKOS9YdVdYeW5iOCtzeHRNSk1NM3hNMmw0Vi8ramhFVEYwdS9hY1JSQldvRjFhdUJIeGxGUkRmd0luNzJHQUlvYwpXelB5NVNxQ2dCU2xOUGdiSGRZYVZsUHVvZW9IZFg5T25FUThBd25TRnVvRTY0OW51Qml6VXNlaTFOOUR0NUtrCnFkbzI1SkhqSEh4amNaQ29EenZlcmJQZkc1SEU2TVVHS3BaMW80UnRBb0dCQUxoMnFlY05rSG01SUkvejFac2MKQTJDZDBmK2IrRXJPdVRGTzB2c1BhRFg4NDZaWGlBNlVqQytqQWJKRnpDK3NMNGVhK1BlbGR5MXp1aDZORTk5MgplWWY4anAvaDJVWEFOZXU5Zms3OVlyVUpHNUVBbVdHemh1Tk1UMVFrUVZINlRmS1d1cFMyeEZnTWxUUE1aVE9YClUyWFFlcHM3S0xpcFNUVjdFcWxPQUw0ckFvR0JBTnFzS3FzaGR2Zk5EekNwUUd3c0dFOUtGY25sQWFtL3pUNHcKUDV1UHIraTg4YnRhSzBOcHhTVkhDeWU4eGdLbWhKQzEwWng4OUhKZkl3cXVQaTY3NktIeGlMOEY5Tmd1WElGZgpBNVpROUtMV3dmdjcySkVaQWtlU1dxMVRMWTB1Sm5EK3c3TGJVK2t4TVBFdWR3OWNRTXpMSVlyQkFtSFZkZHNNClZ3THJWYkhOQW9HQUJscFRUOFRMbVFmRXNGekttMEMwQ3JlY1g2UmNRUURjZklnK0tjSzkyNm9TMm5QS2J2NHoKcDB6cDl2US8rT0NyWjg1N3ErT1l5ZCtzNDlPMDc4OVJqbm1KRzAxQzNwaTlHQ3Y5eUF2aG80WVFna3pTTTFzUgo0VjI4K2hNYVlkWkdRT3pVVjZabUV1cXNLMUdLNjkzZldpTS84TkMydXZXcXlhZFhLUGlpREFZPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
---
# Source: cilium/templates/cilium-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd", "kvstore" or
# "doublewrite-readkvstore" / "doublewrite-readcrd".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in an etcd kvstore, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
# - "doublewrite" modes store identities in both the kvstore and CRDs. This is useful
# for seamless migrations from the kvstore mode to the crd mode. Consult the
# documentation for more information on how to perform the migration.
identity-allocation-mode: crd
identity-heartbeat-timeout: "30m0s"
identity-gc-interval: "15m0s"
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
# If you want to run cilium in debug mode change this value to true
debug: "false"
debug-verbose: ""
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
enable-policy: "default"
policy-cidr-match-mode: ""
# If you want metrics enabled in cilium-operator, set the port for
# which the Cilium Operator will have their metrics exposed.
# NOTE that this will open the port on the nodes where Cilium operator pod
# is scheduled.
operator-prometheus-serve-addr: ":9963"
enable-metrics: "true"
enable-policy-secrets-sync: "true"
policy-secrets-only-from-secrets-namespace: "true"
policy-secrets-namespace: "cilium-secrets"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: medium
# The monitor aggregation interval governs the typical time between monitor
# notification events for each allowed connection.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-interval: "5s"
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
# Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
bpf-map-dynamic-size-ratio: "0.0025"
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
bpf-lb-external-clusterip: "false"
bpf-lb-source-range-all-types: "false"
bpf-lb-algorithm-annotation: "false"
bpf-lb-mode-annotation: "false"
bpf-distributed-lru: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# As a result, reply packets may be dropped and the load-balancing decisions
# for established connections may change.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: "0"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
tunnel-source-port-range: "0-0"
service-no-backend-response: "reject"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
enable-ipv4-masquerade: "true"
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
enable-tcx: "true"
datapath-mode: "veth"
enable-masquerade-to-route-source: "false"
enable-xt-socket-fallback: "true"
install-no-conntrack-iptables-rules: "false"
iptables-random-fully: "false"
auto-direct-node-routes: "false"
direct-routing-skip-unreachable: "false"
enable-local-redirect-policy: "false"
enable-runtime-device-detection: "true"
kube-proxy-replacement: "true"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "false"
nodeport-addresses: ""
enable-health-check-nodeport: "true"
enable-health-check-loadbalancer-ip: "false"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
bpf-lb-acceleration: "disabled"
enable-experimental-lb: "false"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-k8s-networkpolicy: "true"
enable-endpoint-lockdown-on-policy-overflow: "false"
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
cni-exclusive: "true"
cni-log-file: "/var/run/cilium/cilium-cni.log"
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
health-check-icmp-failure-threshold: "3"
enable-well-known-identities: "false"
enable-node-selector-labels: "false"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
enable-hubble: "true"
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
hubble-export-file-max-size-mb: "10"
hubble-export-file-max-backups: "5"
# An additional address for Hubble server to listen to (e.g. ":4244").
hubble-listen-address: ":4244"
hubble-disable-tls: "false"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
ipam: "kubernetes"
ipam-cilium-node-update-rate: "15s"
default-lb-service-ipam: "lbipam"
egress-gateway-reconciliation-trigger-interval: "1s"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
# Enable L2 announcements
enable-l2-announcements: "true"
procfs: "/host/proc"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
enable-sctp: "false"
remove-cilium-node-taints: "true"
set-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "1000"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-proxy-response-max-delay: "100ms"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
mesh-auth-enabled: "true"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
mesh-auth-gc-interval: "5m0s"
proxy-xff-num-trusted-hops-ingress: "0"
proxy-xff-num-trusted-hops-egress: "0"
proxy-connect-timeout: "2"
proxy-initial-fetch-timeout: "30"
proxy-max-requests-per-connection: "0"
proxy-max-connection-duration-seconds: "0"
proxy-idle-timeout-seconds: "60"
proxy-max-concurrent-retries: "128"
http-retry-count: "3"
external-envoy-proxy: "true"
envoy-base-id: "0"
envoy-access-log-buffer-size: "4096"
envoy-keep-cap-netbindservice: "false"
max-connected-clusters: "255"
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
nat-map-stats-entries: "32"
nat-map-stats-interval: "30s"
enable-internal-traffic-policy: "true"
enable-lb-ipam: "true"
enable-non-default-deny-policies: "true"
enable-source-ip-verification: "true"
# Extra config allows adding arbitrary properties to the cilium config.
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.
---
# Source: cilium/templates/cilium-envoy/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-envoy-config
namespace: kube-system
data:
# Keep the key name as bootstrap-config.json to avoid breaking changes
bootstrap-config.json: |
{"admin":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}},"applicationLogConfig":{"logFormat":{"textFormat":"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"}},"bootstrapExtensions":[{"name":"envoy.bootstrap.internal_listener","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"}}],"dynamicResources":{"cdsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"},"ldsConfig":{"apiConfigSource":{"apiType":"GRPC","grpcServices":[{"envoyGrpc":{"clusterName":"xds-grpc-cilium"}}],"setNodeOnFirstMessageOnly":true,"transportApiVersion":"V3"},"initialFetchTimeout":"30s","resourceApiVersion":"V3"}},"node":{"cluster":"ingress-cluster","id":"host~127.0.0.1~no-id~localdomain"},"overloadManager":{"resourceMonitors":[{"name":"envoy.resource_monitors.global_downstream_max_connections","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig","max_active_downstream_connections":"50000"}}]},"staticResources":{"clusters":[{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"egress-cluster","type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"useDownstreamProtocolConfig":{}}}},{"circuitBreakers":{"thresholds":[{"maxRetries":128}]},"cleanupInterval":"2.500s","connectTimeout":"2s","lbPolicy":"CLUSTER_PROVIDED","name":"ingress-cluster-tls","transportSocket":{"name":"cilium.tls_wrapper","typedConfig":{"@type":"type.googleapis.com/cilium.UpstreamTlsWrapperContext"}},"type":"ORIGINAL_DST","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","commonHttpProtocolOptions":{"idleTimeout":"60s","maxConnectionDuration":"0s","maxRequestsPerConnection":0},"upstreamHttpProtocolOptions":{},"useDownstreamProtocolConfig":{}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"xds-grpc-cilium","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/xds.sock"}}}}]}]},"name":"xds-grpc-cilium","type":"STATIC","typedExtensionProtocolOptions":{"envoy.extensions.upstreams.http.v3.HttpProtocolOptions":{"@type":"type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions","explicitHttpConfig":{"http2ProtocolOptions":{}}}}},{"connectTimeout":"2s","loadAssignment":{"clusterName":"/envoy-admin","endpoints":[{"lbEndpoints":[{"endpoint":{"address":{"pipe":{"path":"/var/run/cilium/envoy/sockets/admin.sock"}}}}]}]},"name":"/envoy-admin","type":"STATIC"}],"listeners":[{"address":{"socketAddress":{"address":"0.0.0.0","portValue":9964}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtualHosts":[{"domains":["*"],"name":"prometheus_metrics_route","routes":[{"match":{"prefix":"/metrics"},"name":"prometheus_metrics_route","route":{"cluster":"/envoy-admin","prefixRewrite":"/stats/prometheus"}}]}]},"statPrefix":"envoy-prometheus-metrics-listener","streamIdleTimeout":"0s"}}]}],"name":"envoy-prometheus-metrics-listener"},{"address":{"socketAddress":{"address":"127.0.0.1","portValue":9878}},"filterChains":[{"filters":[{"name":"envoy.filters.network.http_connection_manager","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager","httpFilters":[{"name":"envoy.filters.http.router","typedConfig":{"@type":"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"}}],"internalAddressConfig":{"cidrRanges":[{"addressPrefix":"10.0.0.0","prefixLen":8},{"addressPrefix":"172.16.0.0","prefixLen":12},{"addressPrefix":"192.168.0.0","prefixLen":16},{"addressPrefix":"127.0.0.1","prefixLen":32}]},"routeConfig":{"virtual_hosts":[{"domains":["*"],"name":"health","routes":[{"match":{"prefix":"/healthz"},"name":"health","route":{"cluster":"/envoy-admin","prefixRewrite":"/ready"}}]}]},"statPrefix":"envoy-health-listener","streamIdleTimeout":"0s"}}]}],"name":"envoy-health-listener"}]}}
---
# Source: cilium/templates/hubble-relay/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-relay-config
namespace: kube-system
data:
config.yaml: |
cluster-name: default
peer-service: "hubble-peer.kube-system.svc.cluster.local.:443"
listen-address: :4245
gops: true
gops-port: "9893"
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
tls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt
tls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key
tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt
disable-server-tls: true
---
# Source: cilium/templates/hubble-ui/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-ui-nginx
namespace: kube-system
data:
nginx.conf: "server {\n listen 8081;\n listen [::]:8081;\n server_name localhost;\n root /app;\n index index.html;\n client_max_body_size 1G;\n\n location / {\n proxy_set_header Host $host;\n proxy_set_header X-Real-IP $remote_addr;\n\n location /api {\n proxy_http_version 1.1;\n proxy_pass_request_headers on;\n proxy_pass http://127.0.0.1:8090;\n }\n location / {\n # double `/index.html` is required here \n try_files $uri $uri/ /index.html /index.html;\n }\n\n # Liveness probe\n location /healthz {\n access_log off;\n add_header Content-Type text/plain;\n return 200 'ok';\n }\n }\n}"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- list
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
- watch
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumbgppeeringpolicies
- ciliumbgpnodeconfigs
- ciliumbgpadvertisements
- ciliumbgppeerconfigs
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
- ciliumbgpnodeconfigs/status
verbs:
- patch
---
# Source: cilium/templates/cilium-operator/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- cilium-config
verbs:
# allow patching of the configmap to set annotations
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
# to check apiserver connectivity
- namespaces
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumclusterwidenetworkpolicies
verbs:
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
- create
- update
- deletecollection
# To update the status of the CNPs and CCNPs
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
# To perform garbage collection of such resources
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
# To perform CiliumNode garbage collector
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumbgppeerconfigs
- ciliumbgpadvertisements
- ciliumbgpnodeconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- patch
- apiGroups:
- cilium.io
resources:
- ciliumbgpclusterconfigs/status
- ciliumbgppeerconfigs/status
verbs:
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumloadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumbgpclusterconfigs.cilium.io
- ciliumbgppeerconfigs.cilium.io
- ciliumbgpadvertisements.cilium.io
- ciliumbgpnodeconfigs.cilium.io
- ciliumbgpnodeconfigoverrides.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
- ciliumbgppeeringpolicies
- ciliumbgpclusterconfigs
- ciliumbgpnodeconfigoverrides
- ciliumbgppeerconfigs
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools/status
verbs:
- patch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
---
# Source: cilium/templates/hubble-ui/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- "*"
verbs:
- get
- list
- watch
---
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system
---
# Source: cilium/templates/hubble-ui/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-ui
subjects:
- kind: ServiceAccount
name: "hubble-ui"
namespace: kube-system
---
# Source: cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-config-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
# Source: cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
---
# Source: cilium/templates/cilium-operator/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-operator-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- update
- patch
---
# Source: cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-config-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-tlsinterception-secrets
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-operator-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-operator-tlsinterception-secrets
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system
---
# Source: cilium/templates/cilium-envoy/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cilium-envoy
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9964"
labels:
k8s-app: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
io.cilium/app: proxy
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: cilium-envoy
ports:
- name: envoy-metrics
port: 9964
protocol: TCP
targetPort: envoy-metrics
---
# Source: cilium/templates/hubble-relay/service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-relay
namespace: kube-system
annotations:
labels:
k8s-app: hubble-relay
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
spec:
type: "ClusterIP"
selector:
k8s-app: hubble-relay
ports:
- protocol: TCP
port: 80
targetPort: grpc
---
# Source: cilium/templates/hubble-ui/service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-ui
namespace: kube-system
labels:
k8s-app: hubble-ui
app.kubernetes.io/name: hubble-ui
app.kubernetes.io/part-of: cilium
spec:
type: "ClusterIP"
selector:
k8s-app: hubble-ui
ports:
- name: http
port: 80
targetPort: 8081
---
# Source: cilium/templates/hubble/peer-service.yaml
apiVersion: v1
kind: Service
metadata:
name: hubble-peer
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: hubble-peer
spec:
selector:
k8s-app: cilium
ports:
- name: peer-service
port: 443
protocol: TCP
targetPort: 4244
internalTrafficPolicy: Local
---
# Source: cilium/templates/cilium-agent/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-agent
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: cilium
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.17.3@sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: '1'
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
lifecycle:
postStart:
exec:
command:
- "bash"
- "-c"
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
# Unprivileged containers need to mount /proc/sys/net from the host
# to have write access
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
# Unprivileged containers need to mount /proc/sys/kernel from the host
# to have write access
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- name: bpf-maps
mountPath: /sys/fs/bpf
# Unprivileged containers can't set mount propagation to bidirectional
# in this case we will mount the bpf fs from an init container that
# is privileged and set the mount propagation from host to container
# in Cilium.
mountPropagation: HostToContainer
# Check for duplicate mounts before mounting
- name: cilium-cgroup
mountPath: /sys/fs/cgroup
- name: cilium-run
mountPath: /var/run/cilium
- name: cilium-netns
mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
- name: hubble-tls
mountPath: /var/lib/cilium/tls/hubble
readOnly: true
- name: tmp
mountPath: /tmp
initContainers:
- name: config
image: "quay.io/cilium/cilium:v1.17.3@sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
imagePullPolicy: IfNotPresent
command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePolicy: FallbackToLogsOnError
- name: apply-sysctl-overwrites
image: "quay.io/cilium/cilium:v1.17.3@sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
imagePullPolicy: IfNotPresent
env:
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
# Mount the bpf fs if it is not mounted. We will perform this task
# from a privileged container because the mount propagation bidirectional
# only works from privileged containers.
- name: mount-bpf-fs
image: "quay.io/cilium/cilium:v1.17.3@sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
imagePullPolicy: IfNotPresent
args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
command:
- /bin/bash
- -c
- --
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.17.3@sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
name: cilium-config
key: write-cni-conf-when-ready
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: /sys/fs/cgroup
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium # wait-for-kube-proxy
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "quay.io/cilium/cilium:v1.17.3@sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
imagePullPolicy: IfNotPresent
command:
- "/install-plugin.sh"
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin # .Values.cni.install
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
# For sharing configuration between the "config" initContainer and the agent
- name: tmp
emptyDir: {}
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To exec into pod network namespaces
- name: cilium-netns
hostPath:
path: /var/run/netns
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
# To mount cgroup2 filesystem on the host or apply sysctlfix
- name: hostproc
hostPath:
path: /proc
type: Directory
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: /sys/fs/cgroup
type: DirectoryOrCreate
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Sharing socket with Cilium Envoy on the same node by using a host path
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
# To read the clustermesh configuration
- name: clustermesh-secrets
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: cilium-clustermesh
optional: true
# note: items are not explicitly listed here, since the entries of this secret
# depend on the peers configured, and that would cause a restart of all agents
# at every addition/removal. Leaving the field empty makes each secret entry
# to be automatically projected into the volume as a file whose name is the key.
- secret:
name: clustermesh-apiserver-remote-cert
optional: true
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
# note: we configure the volume for the kvstoremesh-specific certificate
# regardless of whether KVStoreMesh is enabled or not, so that it can be
# automatically mounted in case KVStoreMesh gets subsequently enabled,
# without requiring an agent restart.
- secret:
name: clustermesh-apiserver-local-cert
optional: true
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
- name: host-proc-sys-net
hostPath:
path: /proc/sys/net
type: Directory
- name: host-proc-sys-kernel
hostPath:
path: /proc/sys/kernel
type: Directory
- name: hubble-tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-server-certs
optional: true
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
---
# Source: cilium/templates/cilium-envoy/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium-envoy
namespace: kube-system
labels:
k8s-app: cilium-envoy
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
name: cilium-envoy
spec:
selector:
matchLabels:
k8s-app: cilium-envoy
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: cilium-envoy
name: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
containers:
- name: cilium-envoy
image: "quay.io/cilium/cilium-envoy:v1.32.5-1744305768-f9ddca7dcd91f7ca25a505560e655c47d3dec2cf@sha256:a01cadf7974409b5c5c92ace3d6afa298408468ca24cab1cb413c04f89d3d1f9"
imagePullPolicy: IfNotPresent
command:
- /usr/bin/cilium-envoy-starter
args:
- '--'
- '-c /var/run/cilium/envoy/bootstrap-config.json'
- '--base-id 0'
- '--log-level info'
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
ports:
- name: envoy-metrics
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
- name: envoy-artifacts
mountPath: /var/run/cilium/envoy/artifacts
readOnly: true
- name: envoy-config
mountPath: /var/run/cilium/envoy/
readOnly: true
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium-envoy"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium-envoy
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
- name: envoy-artifacts
hostPath:
path: "/var/run/cilium/envoy/artifacts"
type: DirectoryOrCreate
- name: envoy-config
configMap:
name: "cilium-envoy-config"
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: bootstrap-config.json
path: bootstrap-config.json
# To keep state between restarts / upgrades
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
---
# Source: cilium/templates/cilium-operator/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: 2
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
# of one replica and no user configured Recreate strategy.
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 50%
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.17.3@sha256:8bd38d0e97a955b2d725929d60df09d712fb62b60b930551a29abac2dd92e597"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
ports:
- name: prometheus
containerPort: 9963
hostPort: 9963
protocol: TCP
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 5
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccountName: "cilium-operator"
automountServiceAccountToken: true
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
---
# Source: cilium/templates/hubble-relay/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hubble-relay
namespace: kube-system
labels:
k8s-app: hubble-relay
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-relay
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-relay
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
spec:
securityContext:
fsGroup: 65532
containers:
- name: hubble-relay
securityContext:
capabilities:
drop:
- ALL
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
image: "quay.io/cilium/hubble-relay:v1.17.3@sha256:f8674b5139111ac828a8818da7f2d344b4a5bfbaeb122c5dc9abed3e74000c55"
imagePullPolicy: IfNotPresent
command:
- hubble-relay
args:
- serve
ports:
- name: grpc
containerPort: 4245
readinessProbe:
grpc:
port: 4222
timeoutSeconds: 3
# livenessProbe will kill the pod, we should be very conservative
# here on failures since killing the pod should be a last resort, and
# we should provide enough time for relay to retry before killing it.
livenessProbe:
grpc:
port: 4222
timeoutSeconds: 10
# Give relay time to establish connections and make a few retries
# before starting livenessProbes.
initialDelaySeconds: 10
# 10 second * 12 failures = 2 minutes of failure.
# If relay cannot become healthy after 2 minutes, then killing it
# might resolve whatever issue is occurring.
#
# 10 seconds is a reasonable retry period so we can see if it's
# failing regularly or only sporadically.
periodSeconds: 10
failureThreshold: 12
startupProbe:
grpc:
port: 4222
# Give relay time to get it's certs and establish connections and
# make a few retries before starting startupProbes.
initialDelaySeconds: 10
# 20 * 3 seconds = 1 minute of failure before we consider startup as failed.
failureThreshold: 20
# Retry more frequently at startup so that it can be considered started more quickly.
periodSeconds: 3
volumeMounts:
- name: config
mountPath: /etc/hubble-relay
readOnly: true
- name: tls
mountPath: /var/lib/hubble-relay/tls
readOnly: true
terminationMessagePolicy: FallbackToLogsOnError
restartPolicy: Always
priorityClassName:
serviceAccountName: "hubble-relay"
automountServiceAccountToken: false
terminationGracePeriodSeconds: 1
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
volumes:
- name: config
configMap:
name: hubble-relay-config
items:
- key: config.yaml
path: config.yaml
- name: tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-relay-client-certs
items:
- key: tls.crt
path: client.crt
- key: tls.key
path: client.key
- key: ca.crt
path: hubble-server-ca.crt
---
# Source: cilium/templates/hubble-ui/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: hubble-ui
namespace: kube-system
labels:
k8s-app: hubble-ui
app.kubernetes.io/name: hubble-ui
app.kubernetes.io/part-of: cilium
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-ui
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-ui
app.kubernetes.io/name: hubble-ui
app.kubernetes.io/part-of: cilium
spec:
securityContext:
fsGroup: 1001
runAsGroup: 1001
runAsUser: 1001
priorityClassName:
serviceAccountName: "hubble-ui"
automountServiceAccountToken: true
containers:
- name: frontend
image: "quay.io/cilium/hubble-ui:v0.13.2@sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8081
livenessProbe:
httpGet:
path: /healthz
port: 8081
readinessProbe:
httpGet:
path: /
port: 8081
volumeMounts:
- name: hubble-ui-nginx-conf
mountPath: /etc/nginx/conf.d/default.conf
subPath: nginx.conf
- name: tmp-dir
mountPath: /tmp
terminationMessagePolicy: FallbackToLogsOnError
- name: backend
image: "quay.io/cilium/hubble-ui-backend:v0.13.2@sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15"
imagePullPolicy: IfNotPresent
env:
- name: EVENTS_SERVER_PORT
value: "8090"
- name: FLOWS_API_ADDR
value: "hubble-relay:80"
ports:
- name: grpc
containerPort: 8090
volumeMounts:
terminationMessagePolicy: FallbackToLogsOnError
nodeSelector:
kubernetes.io/os: linux
volumes:
- configMap:
defaultMode: 420
name: hubble-ui-nginx
name: hubble-ui-nginx-conf
- emptyDir: {}
name: tmp-dir