diff --git a/charts/opentelemetry-collector/CONTRIBUTING.md b/charts/opentelemetry-collector/CONTRIBUTING.md index e2dd7d2b0..aa0bf2663 100644 --- a/charts/opentelemetry-collector/CONTRIBUTING.md +++ b/charts/opentelemetry-collector/CONTRIBUTING.md @@ -1,8 +1,12 @@ # Collector Chart Contributing Guide +All changes to the chart require a bump to the version in `chart.yaml`. See the [Contributing Guide](https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/CONTRIBUTING.md#versioning) for our versioning requirements. + +Once the chart version is bumped, the examples must be regenerated. You can regenerate examples by running `make generate-examples CHARTS=opentelemetry-collector`. + ## Bumping Default Collector Version 1. Increase the minor version of the chart by one and set the patch version to zero. 2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default. 3. Review the corresponding release notes in [Collector Core](https://github.com/open-telemetry/opentelemetry-collector/releases), [Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases), and [Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases/releases). If any changes affect the helm charts, adjust the helm chart accordingly. -4. Run `make generate-examples`. \ No newline at end of file +4. Run `make generate-examples CHARTS=opentelemetry-collector`. diff --git a/charts/opentelemetry-collector/Chart.yaml b/charts/opentelemetry-collector/Chart.yaml index 3b8babaf6..0a6f30e85 100644 --- a/charts/opentelemetry-collector/Chart.yaml +++ b/charts/opentelemetry-collector/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: opentelemetry-collector -version: 0.70.1 +version: 0.71.0 description: OpenTelemetry Collector Helm chart for Kubernetes type: application home: https://opentelemetry.io/ @@ -12,4 +12,4 @@ sources: icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png maintainers: - name: povilasv -appVersion: 0.81.0 +appVersion: 0.83.0 diff --git a/charts/opentelemetry-collector/README.md b/charts/opentelemetry-collector/README.md index 59e57aadd..0be258a10 100644 --- a/charts/opentelemetry-collector/README.md +++ b/charts/opentelemetry-collector/README.md @@ -175,7 +175,7 @@ The collector can be configured to collects cluster-level metrics from the Kuber This feature is disabled by default. It has the following requirements: - It requires [k8sclusterreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sclusterreceiver) to be included in the collector, such as [contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) version of the collector image. -- It requires statefulset or deployment mode with a signle replica. +- It requires statefulset or deployment mode with a single replica. To enable this feature, set the `presets.clusterMetrics.enabled` property to `true`. diff --git a/charts/opentelemetry-collector/UPGRADING.md b/charts/opentelemetry-collector/UPGRADING.md index 92187dfda..064d9389b 100644 --- a/charts/opentelemetry-collector/UPGRADING.md +++ b/charts/opentelemetry-collector/UPGRADING.md @@ -1,5 +1,14 @@ # Upgrade guidelines +These upgrade guidelines only contain instructions for version upgrades which require manual modifications on the user's side. +If the version you want to upgrade to is not listed here, then there is nothing to do for you. +Just upgrade and enjoy. + +## 0.62 to 0.63 + +The `kubernetesAttributes` preset now respects order of processors in logs, metrics and traces pipelines. +This implicitly might break your pipelines if you relied on having the `k8sAttributes` processor rendered as the first processor but also explicitly listed it in the signal's pipeline somewhere else. + ## 0.55.2 to 0.56 The `tpl` function has been added to references of pod labels and ingress hosts. This adds the ability to add some reusability in @@ -38,7 +47,8 @@ As of v0.54.0 Collector chart, the default resource limits are removed. If you w ``` resources: limits: - cpu: 256m + # CPU units are in fractions of 1000; memory in powers of 2 + cpu: 250m memory: 512Mi ``` diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrole.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrole.yaml index 04e490f47..4ecd8ca30 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrole.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrole.yaml @@ -5,10 +5,10 @@ kind: ClusterRole metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm rules: - apiGroups: [""] diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrolebinding.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrolebinding.yaml index 7f80abfa6..3d0ec433d 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrolebinding.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/clusterrolebinding.yaml @@ -5,10 +5,10 @@ kind: ClusterRoleBinding metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml index e7c0dd93d..19eb32724 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml index 298050de6..7f5592527 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml index 936123ed3..d3aa6053c 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: 9afdc4232355af0e9611ba8e323cc8abc04f1319755b5987ab98b9d7cc968780 + checksum/config: fea94006711e3ea281444424ef285096ea6e4dd8b8f4b9f271980ee550f0c71e labels: app.kubernetes.io/name: opentelemetry-collector @@ -40,9 +40,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml index 6a1d71406..b7bb8760a 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -23,7 +23,7 @@ spec: template: metadata: annotations: - checksum/config: 1bf6bb08b9d58489b954e01f68c16917b1315518c920ab02a295471986245b8d + checksum/config: 1b343b804ab9820604b361a3e67f9c4ed8adc5843e4d6b7f163f443b50bf336f labels: app.kubernetes.io/name: opentelemetry-collector @@ -42,9 +42,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml index 3be441e25..6cb98e30d 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml @@ -5,15 +5,15 @@ kind: Service metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: type: ClusterIP - ports: + ports: - name: jaeger-compact port: 6831 diff --git a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml b/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml index ea66c4f70..7444af7db 100644 --- a/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml b/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml index 162009052..e2507a0f7 100644 --- a/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: f28a32619bda74ce1925837002d78c7c92f2084a2278431f9736008e385c7353 + checksum/config: 1e90988584dfeaa687ae82210ec0ed112a579c45ca060b352f2acad6425f83f4 labels: app.kubernetes.io/name: opentelemetry-collector @@ -40,9 +40,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml b/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml index d605fe083..0ba6ad0b0 100644 --- a/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml b/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml index edde68de2..cf92fdaf9 100644 --- a/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: 8aa26522f8adbb5938a527c79e098322fc6d9e964ef2b533abdd221aee8b8331 + checksum/config: 84a6c7eae469b2fb5718dfb4dc686dae09d36d816bc6580d24ef959b1aa9c436 labels: app.kubernetes.io/name: opentelemetry-collector @@ -40,9 +40,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml b/charts/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml index d605fe083..0ba6ad0b0 100644 --- a/charts/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml b/charts/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml index 68863cdb5..784fd17e1 100644 --- a/charts/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: 8aa26522f8adbb5938a527c79e098322fc6d9e964ef2b533abdd221aee8b8331 + checksum/config: 84a6c7eae469b2fb5718dfb4dc686dae09d36d816bc6580d24ef959b1aa9c436 labels: app.kubernetes.io/name: opentelemetry-collector @@ -40,9 +40,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrole.yaml b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrole.yaml index a7e7ddc51..b8ebd2c51 100644 --- a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrole.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrole.yaml @@ -5,10 +5,10 @@ kind: ClusterRole metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm rules: - apiGroups: [""] diff --git a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrolebinding.yaml b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrolebinding.yaml index 7f80abfa6..3d0ec433d 100644 --- a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrolebinding.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/clusterrolebinding.yaml @@ -5,10 +5,10 @@ kind: ClusterRoleBinding metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/configmap-agent.yaml b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/configmap-agent.yaml index ec3d008d5..717210de9 100644 --- a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/configmap-agent.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | @@ -68,7 +68,7 @@ data: type: regex_parser - combine_field: attributes.log combine_with: "" - id: cri-containerd-recombine + id: containerd-recombine is_last_entry: attributes.logtag == 'F' max_log_size: 1048576 output: extract_metadata_from_filepath diff --git a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/daemonset.yaml b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/daemonset.yaml index 0b60ec469..a0cf7052a 100644 --- a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/daemonset.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: e4d15741730a2e70fc553f4f3fc08214780f87739d8b89fce84d36eb19ffc26d + checksum/config: b108ec2613d9222514329117b89a686c645633281e1de2acebab5b75981be3be labels: app.kubernetes.io/name: opentelemetry-collector @@ -40,9 +40,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/daemonset-presets/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-presets/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/daemonset-windows/rendered/configmap-agent.yaml b/charts/opentelemetry-collector/examples/daemonset-windows/rendered/configmap-agent.yaml index c460e5634..399e1556b 100644 --- a/charts/opentelemetry-collector/examples/daemonset-windows/rendered/configmap-agent.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-windows/rendered/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | @@ -64,7 +64,7 @@ data: type: regex_parser - combine_field: attributes.log combine_with: "" - id: cri-containerd-recombine + id: containerd-recombine is_last_entry: attributes.logtag == 'F' max_log_size: 1048576 output: extract_metadata_from_filepath diff --git a/charts/opentelemetry-collector/examples/daemonset-windows/rendered/daemonset.yaml b/charts/opentelemetry-collector/examples/daemonset-windows/rendered/daemonset.yaml index 6887c12b6..4413ffe7b 100644 --- a/charts/opentelemetry-collector/examples/daemonset-windows/rendered/daemonset.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-windows/rendered/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-opentelemetry-collector-agent labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: 0b066b6dd7111abed0b750929bd6f42bb4a80988def6838d89a58437674cfc02 + checksum/config: 6fd86fb51096cfb77fdcdac95332933258fa903b3f04769a583f246d16d49c13 labels: app.kubernetes.io/name: opentelemetry-collector @@ -40,9 +40,10 @@ spec: - --config=C:\\conf\relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/daemonset-windows/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/daemonset-windows/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/daemonset-windows/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/daemonset-windows/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml b/charts/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml index 614d36ed9..2ffef1d17 100644 --- a/charts/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml +++ b/charts/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml b/charts/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml index a6284c84c..d0604c91e 100644 --- a/charts/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml +++ b/charts/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: replicas: 3 @@ -23,7 +23,7 @@ spec: template: metadata: annotations: - checksum/config: a5ab63431af27bd65bd2c7bc6495ce0fad951b88a7c2bc37af73cc3b8b4cfab4 + checksum/config: 301008b1fb3b47211a2fac5de132a0082d36c8ec125ee6c66785b83e93195351 labels: app.kubernetes.io/name: opentelemetry-collector @@ -42,9 +42,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/deployment-only/rendered/service.yaml b/charts/opentelemetry-collector/examples/deployment-only/rendered/service.yaml index 3be441e25..6cb98e30d 100644 --- a/charts/opentelemetry-collector/examples/deployment-only/rendered/service.yaml +++ b/charts/opentelemetry-collector/examples/deployment-only/rendered/service.yaml @@ -5,15 +5,15 @@ kind: Service metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: type: ClusterIP - ports: + ports: - name: jaeger-compact port: 6831 diff --git a/charts/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml index d4712263f..94868d6db 100644 --- a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml +++ b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml index a72d7edb2..dcb9afb2b 100644 --- a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml +++ b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -23,7 +23,7 @@ spec: template: metadata: annotations: - checksum/config: 341416f8bbe5e00c1fc015b43e167899b49beb476d22f4c36b2d67e20e1b34fa + checksum/config: 3a2964cb56556d7729e7d2e953e9db00b82c3481190babdd39d6a826fc7f49eb labels: app.kubernetes.io/name: opentelemetry-collector @@ -42,9 +42,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: otlp containerPort: 4317 protocol: TCP diff --git a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml index d2fd749fd..994aeebd9 100644 --- a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml +++ b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml @@ -5,15 +5,15 @@ kind: Service metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: type: ClusterIP - ports: + ports: - name: otlp port: 4317 diff --git a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml b/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml index 32d30e5c5..126ce479d 100644 --- a/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml +++ b/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -42,9 +42,10 @@ spec: - --config=/conf/config.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml b/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml index 3be441e25..6cb98e30d 100644 --- a/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml +++ b/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml @@ -5,15 +5,15 @@ kind: Service metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: type: ClusterIP - ports: + ports: - name: jaeger-compact port: 6831 diff --git a/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml new file mode 100644 index 000000000..b8ebd2c51 --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml @@ -0,0 +1,22 @@ +--- +# Source: opentelemetry-collector/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.71.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.83.0" + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: [""] + resources: ["pods", "namespaces"] + verbs: ["get", "watch", "list"] + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml new file mode 100644 index 000000000..3d0ec433d --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: opentelemetry-collector/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.71.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.83.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-collector +subjects: +- kind: ServiceAccount + name: example-opentelemetry-collector + namespace: default diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/configmap.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/configmap.yaml new file mode 100644 index 000000000..61217d3a4 --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/configmap.yaml @@ -0,0 +1,120 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.71.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.83.0" + app.kubernetes.io/managed-by: Helm +data: + relay: | + exporters: + logging: {} + extensions: + health_check: {} + memory_ballast: + size_in_percentage: 40 + processors: + batch: {} + k8sattributes: + extract: + metadata: + - k8s.namespace.name + - k8s.replicaset.name + - k8s.statefulset.name + - k8s.daemonset.name + - k8s.cronjob.name + - k8s.job.name + - k8s.node.name + - k8s.pod.name + - k8s.pod.uid + - k8s.pod.start_time + passthrough: false + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + - sources: + - from: resource_attribute + name: k8s.pod.uid + - sources: + - from: connection + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + transform/k8s_attributes: + metric_statements: + - context: resource + statements: + - set(attributes["k8s.deployment.name"], attributes["k8s.replicaset.name"]) + - replace_pattern(attributes["k8s.deployment.name"], "^(.*)-[0-9a-zA-Z]+$", + "$$1") + - delete_key(attributes, "k8s.replicaset.name") + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + - memory_ballast + pipelines: + logs: + exporters: + - logging + processors: + - k8sattributes + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - logging + processors: + - k8sattributes + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - logging + processors: + - resource + - k8sattributes + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/deployment.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/deployment.yaml new file mode 100644 index 000000000..37ffae847 --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/deployment.yaml @@ -0,0 +1,93 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.71.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.83.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: df1e6761f057e6bd01ca995df479fd2ef89a3ac3074e05a5061926af91e424c9 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-contrib + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-contrib:0.83.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + nodeSelector: + kubernetes.io/os: linux + hostNetwork: false diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml new file mode 100644 index 000000000..6cb98e30d --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml @@ -0,0 +1,47 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.71.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.83.0" + app.kubernetes.io/managed-by: Helm + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml new file mode 100644 index 000000000..84ee00095 --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.71.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.83.0" + app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/kubernetesAttributes/values.yaml b/charts/opentelemetry-collector/examples/kubernetesAttributes/values.yaml new file mode 100644 index 000000000..0a84d08c6 --- /dev/null +++ b/charts/opentelemetry-collector/examples/kubernetesAttributes/values.yaml @@ -0,0 +1,15 @@ +mode: deployment + +presets: + kubernetesAttributes: + enabled: true + +config: + service: + pipelines: + traces: + processors: + - resource + - k8sattributes + - batch + \ No newline at end of file diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole-targetallocator.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole-targetallocator.yaml index 8720f16d8..39b94736a 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole-targetallocator.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole-targetallocator.yaml @@ -5,10 +5,10 @@ kind: ClusterRole metadata: name: example-opentelemetry-collector-targetallocator labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm rules: - apiGroups: [""] diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole.yaml index a7e7ddc51..b8ebd2c51 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrole.yaml @@ -5,10 +5,10 @@ kind: ClusterRole metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm rules: - apiGroups: [""] diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding-targetallocator.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding-targetallocator.yaml index 7bd6ee8a7..5be5c278a 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding-targetallocator.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding-targetallocator.yaml @@ -5,10 +5,10 @@ kind: ClusterRoleBinding metadata: name: example-opentelemetry-collector-targetallocator labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding.yaml index 7540765e2..d7dcea9d8 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/clusterrolebinding.yaml @@ -5,10 +5,10 @@ kind: ClusterRoleBinding metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/integrations/mysql/opentelemetrycollector-sidecar.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/integrations/mysql/opentelemetrycollector-sidecar.yaml index 97cd2f996..29fc021bb 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/integrations/mysql/opentelemetrycollector-sidecar.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/integrations/mysql/opentelemetrycollector-sidecar.yaml @@ -6,7 +6,7 @@ metadata: name: example-opentelemetry-collector-mysql-logs-sidecar spec: mode: sidecar - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" volumeMounts: - mountPath: /var/lib/mysql name: data diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/opentelemetrycollector.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/opentelemetrycollector.yaml index 64457b212..28b07ae57 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/opentelemetrycollector.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/opentelemetrycollector.yaml @@ -5,10 +5,10 @@ kind: OpenTelemetryCollector metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: mode: statefulset @@ -22,7 +22,7 @@ spec: podSecurityContext: runAsUser: 0 runAsGroup: 0 - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: - name: jaeger-compact @@ -140,7 +140,7 @@ spec: type: regex_parser - combine_field: attributes.log combine_with: "" - id: cri-containerd-recombine + id: containerd-recombine is_last_entry: attributes.logtag == 'F' max_log_size: 1048576 output: extract_metadata_from_filepath diff --git a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/serviceaccount-targetallocator.yaml b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/serviceaccount-targetallocator.yaml index 50707b506..5ba1d9bcd 100644 --- a/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/serviceaccount-targetallocator.yaml +++ b/charts/opentelemetry-collector/examples/opentelemetrycollector-crd/rendered/serviceaccount-targetallocator.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: default-targetallocator labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml b/charts/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml index 6d9558652..2a9c025a6 100644 --- a/charts/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml +++ b/charts/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-opentelemetry-collector-statefulset labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml b/charts/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml index 3fe0fa955..5445abdd2 100644 --- a/charts/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml +++ b/charts/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml @@ -5,15 +5,15 @@ kind: Service metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm component: statefulset-collector spec: type: ClusterIP - ports: + ports: - name: jaeger-compact port: 6831 diff --git a/charts/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml b/charts/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml index aea428fa1..84ee00095 100644 --- a/charts/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml b/charts/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml index f99816c28..0c5bfbb80 100644 --- a/charts/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml +++ b/charts/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml @@ -5,10 +5,10 @@ kind: StatefulSet metadata: name: example-opentelemetry-collector labels: - helm.sh/chart: opentelemetry-collector-0.70.1 + helm.sh/chart: opentelemetry-collector-0.71.0 app.kubernetes.io/name: opentelemetry-collector app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.83.0" app.kubernetes.io/managed-by: Helm spec: serviceName: example-opentelemetry-collector @@ -24,7 +24,7 @@ spec: template: metadata: annotations: - checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/config: 42e4f42af502a314d80db7bb26239333d8ef52af1bfb9eaef672678a3786499e labels: app.kubernetes.io/name: opentelemetry-collector @@ -43,9 +43,10 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.81.0" + image: "otel/opentelemetry-collector-contrib:0.83.0" imagePullPolicy: IfNotPresent ports: + - name: jaeger-compact containerPort: 6831 protocol: UDP diff --git a/charts/opentelemetry-collector/templates/_config.tpl b/charts/opentelemetry-collector/templates/_config.tpl index 93261d03a..8b4df3704 100644 --- a/charts/opentelemetry-collector/templates/_config.tpl +++ b/charts/opentelemetry-collector/templates/_config.tpl @@ -254,7 +254,7 @@ receivers: parse_from: attributes.time layout: '%Y-%m-%dT%H:%M:%S.%LZ' - type: recombine - id: cri-containerd-recombine + id: containerd-recombine output: extract_metadata_from_filepath combine_field: attributes.log source_identifier: attributes["log.file.path"] @@ -431,13 +431,13 @@ receivers: {{- define "opentelemetry-collector.applyKubernetesAttributesConfig" -}} {{- $config := mustMergeOverwrite (include "opentelemetry-collector.kubernetesAttributesConfig" .Values | fromYaml) .config }} -{{- if $config.service.pipelines.logs }} +{{- if and ($config.service.pipelines.logs) (not (has "k8sattributes" $config.service.pipelines.logs.processors)) }} {{- $_ := set $config.service.pipelines.logs "processors" (prepend $config.service.pipelines.logs.processors "k8sattributes" | uniq) }} {{- end }} -{{- if $config.service.pipelines.metrics }} +{{- if and ($config.service.pipelines.metrics) (not (has "k8sattributes" $config.service.pipelines.metrics.processors)) }} {{- $_ := set $config.service.pipelines.metrics "processors" (prepend $config.service.pipelines.metrics.processors "k8sattributes" | uniq) }} {{- end }} -{{- if $config.service.pipelines.traces }} +{{- if and ($config.service.pipelines.traces) (not (has "k8sattributes" $config.service.pipelines.traces.processors)) }} {{- $_ := set $config.service.pipelines.traces "processors" (prepend $config.service.pipelines.traces.processors "k8sattributes" | uniq) }} {{- end }} {{- $config | toYaml }} @@ -481,8 +481,8 @@ processors: - delete_key(attributes, "k8s.replicaset.name") {{- end }} -{{/* Build the list of port for deployment service */}} -{{- define "opentelemetry-collector.deploymentPortsConfig" -}} +{{/* Build the list of port for service */}} +{{- define "opentelemetry-collector.servicePortsConfig" -}} {{- $ports := deepCopy .Values.ports }} {{- range $key, $port := $ports }} {{- if $port.enabled }} @@ -500,6 +500,21 @@ processors: {{- end }} {{- end }} +{{/* Build the list of port for pod */}} +{{- define "opentelemetry-collector.podPortsConfig" -}} +{{- $ports := deepCopy .Values.ports }} +{{- range $key, $port := $ports }} +{{- if $port.enabled }} +- name: {{ $key }} + containerPort: {{ $port.containerPort }} + protocol: {{ $port.protocol }} + {{- if and $.isAgent $port.hostPort }} + hostPort: {{ $port.hostPort }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} + {{- define "opentelemetry-collector.applyKubernetesEventsConfig" -}} {{- $config := mustMergeOverwrite (include "opentelemetry-collector.kubernetesEventsConfig" .Values | fromYaml) .config }} {{- $_ := set $config.service.pipelines.logs "receivers" (append $config.service.pipelines.logs.receivers "k8sobjects" | uniq) }} diff --git a/charts/opentelemetry-collector/templates/_helpers.tpl b/charts/opentelemetry-collector/templates/_helpers.tpl index ce020be06..432370f96 100644 --- a/charts/opentelemetry-collector/templates/_helpers.tpl +++ b/charts/opentelemetry-collector/templates/_helpers.tpl @@ -147,8 +147,10 @@ Check if logs collection is enabled via deprecated "containerLogs" or "preset.lo Compute Service creation on mode */}} {{- define "opentelemetry-collector.serviceEnabled" }} - {{- $serviceEnabled := true -}} - + {{- $serviceEnabled := true }} + {{- if not (eq (toString .Values.service.enabled) "") }} + {{- $serviceEnabled = .Values.service.enabled -}} + {{- end }} {{- if or (and (eq .Values.mode "daemonset") (not .Values.service.enabled)) (.Values.collectorCRD.generate) }} {{- $serviceEnabled = false -}} {{- end }} diff --git a/charts/opentelemetry-collector/templates/_pod.tpl b/charts/opentelemetry-collector/templates/_pod.tpl index 86d6ead85..0a329691f 100644 --- a/charts/opentelemetry-collector/templates/_pod.tpl +++ b/charts/opentelemetry-collector/templates/_pod.tpl @@ -36,17 +36,12 @@ containers: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy }} + + {{- $ports := include "opentelemetry-collector.podPortsConfig" . }} + {{- if $ports }} ports: - {{- range $key, $port := .Values.ports }} - {{- if $port.enabled }} - - name: {{ $key }} - containerPort: {{ $port.containerPort }} - protocol: {{ $port.protocol }} - {{- if and $.isAgent $port.hostPort }} - hostPort: {{ $port.hostPort }} - {{- end }} - {{- end }} - {{- end }} + {{- $ports | nindent 6}} + {{- end }} env: - name: MY_POD_IP valueFrom: @@ -62,6 +57,10 @@ containers: {{- with .Values.extraEnvs }} {{- . | toYaml | nindent 6 }} {{- end }} + {{- with .Values.extraEnvsFrom }} + envFrom: + {{- . | toYaml | nindent 6 }} + {{- end }} {{- if .Values.lifecycleHooks }} lifecycle: {{- toYaml .Values.lifecycleHooks | nindent 6 }} diff --git a/charts/opentelemetry-collector/templates/clusterrole.yaml b/charts/opentelemetry-collector/templates/clusterrole.yaml index 84e069c53..c53401dbb 100644 --- a/charts/opentelemetry-collector/templates/clusterrole.yaml +++ b/charts/opentelemetry-collector/templates/clusterrole.yaml @@ -54,6 +54,6 @@ rules: {{- if .Values.presets.kubernetesEvents.enabled }} - apiGroups: ["events.k8s.io"] resources: ["events"] - verbs: ["watch"] + verbs: ["watch", "list"] {{- end }} {{- end }} diff --git a/charts/opentelemetry-collector/templates/service.yaml b/charts/opentelemetry-collector/templates/service.yaml index 7a9e16833..73fe9113a 100644 --- a/charts/opentelemetry-collector/templates/service.yaml +++ b/charts/opentelemetry-collector/templates/service.yaml @@ -26,9 +26,16 @@ spec: - {{ . }} {{- end }} {{- end }} - ports: {{ include "opentelemetry-collector.deploymentPortsConfig" . | nindent 4 }} + {{- $ports := include "opentelemetry-collector.servicePortsConfig" . }} + {{- if $ports }} + ports: + {{- $ports | nindent 4}} + {{- end }} selector: {{- include "opentelemetry-collector.selectorLabels" . | nindent 4 }} {{- include "opentelemetry-collector.component" . | nindent 4 }} internalTrafficPolicy: {{ include "opentelemetry-collector.serviceInternalTrafficPolicy" . }} + {{- if and (eq .Values.service.type "LoadBalancer") (.Values.service.externalTrafficPolicy) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} {{- end }} diff --git a/charts/opentelemetry-collector/templates/statefulset.yaml b/charts/opentelemetry-collector/templates/statefulset.yaml index bca3e1b00..29f23fdd0 100644 --- a/charts/opentelemetry-collector/templates/statefulset.yaml +++ b/charts/opentelemetry-collector/templates/statefulset.yaml @@ -27,7 +27,7 @@ spec: template: metadata: annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap-statefulset.yaml") . | sha256sum }} {{- include "opentelemetry-collector.podAnnotations" . | nindent 8 }} labels: {{- include "opentelemetry-collector.selectorLabels" . | nindent 8 }} @@ -38,7 +38,7 @@ spec: dnsPolicy: {{ . }} {{- end }} {{- with .Values.dnsConfig }} - dnsConfig: + dnsConfig: {{- toYaml . | nindent 8 }} {{- end }} {{- $podValues := deepCopy .Values }} diff --git a/charts/opentelemetry-collector/values.schema.json b/charts/opentelemetry-collector/values.schema.json index 62d31825d..9451da6f2 100644 --- a/charts/opentelemetry-collector/values.schema.json +++ b/charts/opentelemetry-collector/values.schema.json @@ -474,6 +474,12 @@ "type": "object" } }, + "extraEnvsFrom": { + "type": "array", + "items": { + "type": "object" + } + }, "extraConfigMapMounts": { "type": "array", "items": { @@ -796,6 +802,10 @@ "internalTrafficPolicy": { "type": "string", "enum": ["Cluster", "Local", ""] + }, + "externalTrafficPolicy": { + "type": "string", + "enum": ["Cluster", "Local", ""] } } }, diff --git a/charts/opentelemetry-collector/values.yaml b/charts/opentelemetry-collector/values.yaml index 05e22453b..3cca3754e 100644 --- a/charts/opentelemetry-collector/values.yaml +++ b/charts/opentelemetry-collector/values.yaml @@ -298,6 +298,7 @@ topologySpreadConstraints: [] priorityClassName: "" extraEnvs: [] +extraEnvsFrom: [] extraVolumes: [] extraVolumeMounts: [] @@ -354,7 +355,7 @@ ports: resources: {} # resources: # limits: -# cpu: 256m +# cpu: 250m # memory: 512Mi podAnnotations: {} @@ -471,6 +472,12 @@ service: # type: LoadBalancer # loadBalancerIP: 1.2.3.4 # loadBalancerSourceRanges: [] + + # By default, Service of type 'LoadBalancer' will be created setting 'externalTrafficPolicy: Cluster' + # unless other value is explicitly set. + # Possible values are Cluster or Local (https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip) + # externalTrafficPolicy: Cluster + annotations: {} # By default, Service will be created setting 'internalTrafficPolicy: Local' on mode = daemonset diff --git a/charts/opentelemetry-demo/Chart.lock b/charts/opentelemetry-demo/Chart.lock index e842413c0..48c6a112f 100644 --- a/charts/opentelemetry-demo/Chart.lock +++ b/charts/opentelemetry-demo/Chart.lock @@ -1,15 +1,15 @@ dependencies: - name: opentelemetry-collector repository: https://open-telemetry.github.io/opentelemetry-helm-charts - version: 0.55.1 + version: 0.65.1 - name: jaeger repository: https://jaegertracing.github.io/helm-charts - version: 0.69.1 + version: 0.71.11 - name: prometheus repository: https://prometheus-community.github.io/helm-charts - version: 20.2.0 + version: 23.3.0 - name: grafana repository: https://grafana.github.io/helm-charts - version: 6.52.8 -digest: sha256:ea20e40b7172424213cf43690bee2880ea4e2ae6863651bc94e1a02e16bfc3ef -generated: "2023-05-01T21:40:06.422733-04:00" + version: 6.58.8 +digest: sha256:07407bf09ca623b169850105796321e569cf9f0934b667ab44dd8a3e611c9534 +generated: "2023-08-10T22:36:06.487161-04:00" diff --git a/charts/opentelemetry-demo/Chart.yaml b/charts/opentelemetry-demo/Chart.yaml index 5e3ec972b..ec859120e 100644 --- a/charts/opentelemetry-demo/Chart.yaml +++ b/charts/opentelemetry-demo/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 type: application name: opentelemetry-demo -version: 0.22.3 +version: 0.23.0 description: opentelemetry demo helm chart home: https://opentelemetry.io/ sources: @@ -14,18 +14,18 @@ icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png appVersion: "1.4.0" dependencies: - name: opentelemetry-collector - version: 0.55.1 + version: 0.65.1 repository: https://open-telemetry.github.io/opentelemetry-helm-charts condition: opentelemetry-collector.enabled - name: jaeger - version: 0.69.1 + version: 0.71.11 repository: https://jaegertracing.github.io/helm-charts condition: jaeger.enabled - name: prometheus - version: 20.2.0 + version: 23.3.0 repository: https://prometheus-community.github.io/helm-charts condition: prometheus.enabled - name: grafana - version: 6.52.8 + version: 6.58.8 repository: https://grafana.github.io/helm-charts condition: grafana.enabled diff --git a/charts/opentelemetry-demo/UPGRADING.md b/charts/opentelemetry-demo/UPGRADING.md index c04e6ba4f..2b51e54b8 100644 --- a/charts/opentelemetry-demo/UPGRADING.md +++ b/charts/opentelemetry-demo/UPGRADING.md @@ -1,5 +1,11 @@ # Upgrade guidelines +## To 0.23 + +The Prometheus sub-chart dependency made updates to pod labels. You may need to +use the `--force` option with your Helm upgrade command, or delete the release +and re-install it. + ## To 0.22 This release moves to using the `connectors` functionality in the OpenTelemetry diff --git a/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/component.yaml b/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/component.yaml index d3185fa9a..57f6575f0 100644 --- a/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/component.yaml +++ b/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/component.yaml @@ -5,7 +5,7 @@ kind: Service metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -30,7 +30,7 @@ kind: Service metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -55,7 +55,7 @@ kind: Service metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -80,7 +80,7 @@ kind: Service metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -105,7 +105,7 @@ kind: Service metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -130,7 +130,7 @@ kind: Service metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -158,7 +158,7 @@ kind: Service metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -183,7 +183,7 @@ kind: Service metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -208,7 +208,7 @@ kind: Service metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -233,7 +233,7 @@ kind: Service metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -261,7 +261,7 @@ kind: Service metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -286,7 +286,7 @@ kind: Service metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -311,7 +311,7 @@ kind: Service metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -336,7 +336,7 @@ kind: Service metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -361,7 +361,7 @@ kind: Service metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -386,7 +386,7 @@ kind: Service metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -411,7 +411,7 @@ kind: Service metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example @@ -436,7 +436,7 @@ kind: Deployment metadata: name: example-accountingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-accountingservice app.kubernetes.io/instance: example @@ -518,7 +518,7 @@ kind: Deployment metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -600,7 +600,7 @@ kind: Deployment metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -690,7 +690,7 @@ kind: Deployment metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -790,7 +790,7 @@ kind: Deployment metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -868,7 +868,7 @@ kind: Deployment metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -948,7 +948,7 @@ kind: Deployment metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -1048,7 +1048,7 @@ kind: Deployment metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -1132,7 +1132,7 @@ kind: Deployment metadata: name: example-frauddetectionservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frauddetectionservice app.kubernetes.io/instance: example @@ -1214,7 +1214,7 @@ kind: Deployment metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -1316,7 +1316,7 @@ kind: Deployment metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -1420,7 +1420,7 @@ kind: Deployment metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -1506,7 +1506,7 @@ kind: Deployment metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -1596,7 +1596,7 @@ kind: Deployment metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -1678,7 +1678,7 @@ kind: Deployment metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -1758,7 +1758,7 @@ kind: Deployment metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -1842,7 +1842,7 @@ kind: Deployment metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -1928,7 +1928,7 @@ kind: Deployment metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -2006,7 +2006,7 @@ kind: Deployment metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/serviceaccount.yaml b/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/serviceaccount.yaml index 8ffa88bbd..386fdd068 100644 --- a/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/bring-your-own-observability/rendered/serviceaccount.yaml @@ -5,7 +5,7 @@ kind: ServiceAccount metadata: name: example labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/component.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/component.yaml index 38ce03738..bee0c9cf8 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/component.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/component.yaml @@ -5,7 +5,7 @@ kind: Service metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -30,7 +30,7 @@ kind: Service metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -55,7 +55,7 @@ kind: Service metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -80,7 +80,7 @@ kind: Service metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -105,7 +105,7 @@ kind: Service metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -130,7 +130,7 @@ kind: Service metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -158,7 +158,7 @@ kind: Service metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -183,7 +183,7 @@ kind: Service metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -208,7 +208,7 @@ kind: Service metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -233,7 +233,7 @@ kind: Service metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -261,7 +261,7 @@ kind: Service metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -286,7 +286,7 @@ kind: Service metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -311,7 +311,7 @@ kind: Service metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -336,7 +336,7 @@ kind: Service metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -361,7 +361,7 @@ kind: Service metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -386,7 +386,7 @@ kind: Service metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -411,7 +411,7 @@ kind: Service metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example @@ -436,7 +436,7 @@ kind: Deployment metadata: name: example-accountingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-accountingservice app.kubernetes.io/instance: example @@ -518,7 +518,7 @@ kind: Deployment metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -600,7 +600,7 @@ kind: Deployment metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -690,7 +690,7 @@ kind: Deployment metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -790,7 +790,7 @@ kind: Deployment metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -868,7 +868,7 @@ kind: Deployment metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -948,7 +948,7 @@ kind: Deployment metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -1048,7 +1048,7 @@ kind: Deployment metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -1132,7 +1132,7 @@ kind: Deployment metadata: name: example-frauddetectionservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frauddetectionservice app.kubernetes.io/instance: example @@ -1214,7 +1214,7 @@ kind: Deployment metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -1316,7 +1316,7 @@ kind: Deployment metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -1420,7 +1420,7 @@ kind: Deployment metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -1506,7 +1506,7 @@ kind: Deployment metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -1596,7 +1596,7 @@ kind: Deployment metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -1678,7 +1678,7 @@ kind: Deployment metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -1758,7 +1758,7 @@ kind: Deployment metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -1842,7 +1842,7 @@ kind: Deployment metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -1928,7 +1928,7 @@ kind: Deployment metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -2006,7 +2006,7 @@ kind: Deployment metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana-dashboards.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana-dashboards.yaml index d5bac47d8..57a061ba5 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana-dashboards.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana-dashboards.yaml @@ -5,7 +5,7 @@ kind: ConfigMap metadata: name: example-grafana-dashboards labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrole.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrole.yaml index 37dd3e814..41ad3ca81 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrole.yaml @@ -4,10 +4,10 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-clusterrole rules: [] diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrolebinding.yaml index 1bb386013..b29ae8821 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/clusterrolebinding.yaml @@ -5,10 +5,10 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: example-grafana-clusterrolebinding labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/configmap.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/configmap.yaml index d85c20ab6..1be423a56 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/configmap.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/configmap.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: grafana.ini: | diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/deployment.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/deployment.yaml index 703abeec6..b65d0eb83 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/deployment.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/deployment.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -26,10 +26,11 @@ spec: app.kubernetes.io/name: grafana app.kubernetes.io/instance: example annotations: - checksum/config: 0bd174d19e5d455ad71aea624e65ae51fd412a6b3c54fba8db25d85a897c0f1b + checksum/config: faa523acbb15ce366dd2353d5417b71479ace592c9694e315997add86f6592bc checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b - checksum/secret: 159754aaf8e3f63d0512321d2f4ec32957e240bc75210d5e24303e6d266666b4 + checksum/secret: a5b2dd94ad3d3c38ad2f31d52e0b0588f2df6263a230726d9f6673b8b23105da + kubectl.kubernetes.io/default-container: grafana spec: serviceAccountName: example-grafana @@ -37,12 +38,20 @@ spec: securityContext: fsGroup: 472 runAsGroup: 472 + runAsNonRoot: true runAsUser: 472 enableServiceLinks: true containers: - name: grafana - image: "grafana/grafana:9.4.7" + image: "docker.io/grafana/grafana:10.0.3" imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault volumeMounts: - name: config mountPath: "/etc/grafana/grafana.ini" diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/role.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/role.yaml index ffe05e274..4bb6ab1b9 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/role.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/role.yaml @@ -6,9 +6,9 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm rules: [] diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/rolebinding.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/rolebinding.yaml index 2ff6b3d95..3fb842746 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/rolebinding.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/rolebinding.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/secret.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/secret.yaml index 8ca5ebfc6..5945da59c 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/secret.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/secret.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm type: Opaque data: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/service.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/service.yaml index 20e30b774..397a3d56c 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/service.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/service.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/serviceaccount.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/serviceaccount.yaml index 2d4090b96..7bdd349a1 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana namespace: default diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-configmap.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-configmap.yaml index 783067d38..22511fc1f 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-configmap.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-configmap.yaml @@ -9,10 +9,10 @@ metadata: "helm.sh/hook": test-success "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: run.sh: |- diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-serviceaccount.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-serviceaccount.yaml index 5b43f7194..9cf86f6c9 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test-serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-test namespace: default diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test.yaml index 27a0e8224..ae03b9581 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/grafana/tests/test.yaml @@ -5,10 +5,10 @@ kind: Pod metadata: name: example-grafana-test labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test-success @@ -18,7 +18,7 @@ spec: serviceAccountName: example-grafana-test containers: - name: example-test - image: "bats/bats:v1.4.1" + image: "docker.io/bats/bats:v1.4.1" imagePullPolicy: "IfNotPresent" command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] volumeMounts: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-agent-svc.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-agent-svc.yaml index 13c4b0a18..19d9ec47c 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-agent-svc.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-agent-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-agent labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-agent spec: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-collector-svc.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-collector-svc.yaml index 8786c0e7b..1232c2f82 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-collector-svc.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-collector-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-collector labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-collector spec: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-deploy.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-deploy.yaml index 4518b9fe4..d492c2124 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-deploy.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-deploy.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one prometheus.io/port: "14269" @@ -44,7 +44,7 @@ spec: value: "false" - name: COLLECTOR_OTLP_ENABLED value: "true" - image: jaegertracing/all-in-one:1.42.0 + image: jaegertracing/all-in-one:1.45.0 imagePullPolicy: IfNotPresent name: jaeger args: @@ -96,4 +96,6 @@ spec: resources: limits: memory: 300Mi + volumeMounts: serviceAccountName: example-jaeger + volumes: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-query-svc.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-query-svc.yaml index da5bade96..032bee30f 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-query-svc.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-query-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-query labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-query spec: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-sa.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-sa.yaml index a5ae7bb57..fa14f6868 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-sa.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/jaeger/allinone-sa.yaml @@ -5,9 +5,9 @@ kind: ServiceAccount metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/configmap-agent.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/configmap-agent.yaml index 345f01cff..a6e36ebb5 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/configmap-agent.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/configmap-agent.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-otelcol-agent labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/daemonset.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/daemonset.yaml index 96f550d9c..6f60463bd 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/daemonset.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/daemonset.yaml @@ -5,10 +5,10 @@ kind: DaemonSet metadata: name: example-otelcol-agent labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm spec: selector: @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - checksum/config: 627ef43facddeaa6c6e7f91d8b1e04c1c7f292cbacad373de913a5a337dfc669 + checksum/config: e62fcdf5b3a33ec3cdb0ad02e110fcaa5f3ec9f29067a643b7413b0bd7623171 opentelemetry_community_demo: "true" prometheus.io/port: "9464" prometheus.io/scrape: "true" @@ -42,7 +42,7 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.76.1" + image: "otel/opentelemetry-collector-contrib:0.82.0" imagePullPolicy: IfNotPresent ports: - name: jaeger-compact diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/serviceaccount.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/serviceaccount.yaml index 2e262e6fb..5601241bf 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/opentelemetry-collector/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrole.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrole.yaml index f33254581..dc9341909 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrole.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server rules: - apiGroups: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrolebinding.yaml index 114cb7979..5b48ce6a0 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/clusterrolebinding.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/cm.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/cm.yaml index 149a9bebf..a1d733410 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/cm.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/cm.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default data: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/deploy.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/deploy.yaml index b5578f03b..cbf307609 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/deploy.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/deploy.yaml @@ -4,38 +4,43 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: selector: matchLabels: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example replicas: 1 + revisionHistoryLimit: 10 strategy: type: Recreate rollingUpdate: null template: metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus spec: enableServiceLinks: true serviceAccountName: example-prometheus-server containers: - name: prometheus-server - image: "quay.io/prometheus/prometheus:v2.43.0" + image: "quay.io/prometheus/prometheus:v2.46.0" imagePullPolicy: "IfNotPresent" args: - --storage.tsdb.retention.time=15d diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/service.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/service.yaml index 8e4cc196d..e184e9f0d 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/service.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/service.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: Service metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: @@ -18,8 +20,8 @@ spec: protocol: TCP targetPort: 9090 selector: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example sessionAffinity: None type: "ClusterIP" diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/serviceaccount.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/serviceaccount.yaml index c9d51410f..1ebd2db1e 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/prometheus/serviceaccount.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default annotations: diff --git a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/serviceaccount.yaml b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/serviceaccount.yaml index 8ffa88bbd..386fdd068 100644 --- a/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/collector-as-daemonset/rendered/serviceaccount.yaml @@ -5,7 +5,7 @@ kind: ServiceAccount metadata: name: example labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/component.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/component.yaml index 0165a7d39..f36cbc8f8 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/component.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/component.yaml @@ -5,7 +5,7 @@ kind: Service metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -30,7 +30,7 @@ kind: Service metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -55,7 +55,7 @@ kind: Service metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -80,7 +80,7 @@ kind: Service metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -105,7 +105,7 @@ kind: Service metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -130,7 +130,7 @@ kind: Service metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -158,7 +158,7 @@ kind: Service metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -183,7 +183,7 @@ kind: Service metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -208,7 +208,7 @@ kind: Service metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -233,7 +233,7 @@ kind: Service metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -261,7 +261,7 @@ kind: Service metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -286,7 +286,7 @@ kind: Service metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -311,7 +311,7 @@ kind: Service metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -336,7 +336,7 @@ kind: Service metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -361,7 +361,7 @@ kind: Service metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -386,7 +386,7 @@ kind: Service metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -411,7 +411,7 @@ kind: Service metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example @@ -436,7 +436,7 @@ kind: Deployment metadata: name: example-accountingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-accountingservice app.kubernetes.io/instance: example @@ -520,7 +520,7 @@ kind: Deployment metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -604,7 +604,7 @@ kind: Deployment metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -696,7 +696,7 @@ kind: Deployment metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -798,7 +798,7 @@ kind: Deployment metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -878,7 +878,7 @@ kind: Deployment metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -960,7 +960,7 @@ kind: Deployment metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -1062,7 +1062,7 @@ kind: Deployment metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -1146,7 +1146,7 @@ kind: Deployment metadata: name: example-frauddetectionservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frauddetectionservice app.kubernetes.io/instance: example @@ -1230,7 +1230,7 @@ kind: Deployment metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -1334,7 +1334,7 @@ kind: Deployment metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -1438,7 +1438,7 @@ kind: Deployment metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -1524,7 +1524,7 @@ kind: Deployment metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -1616,7 +1616,7 @@ kind: Deployment metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -1700,7 +1700,7 @@ kind: Deployment metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -1782,7 +1782,7 @@ kind: Deployment metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -1868,7 +1868,7 @@ kind: Deployment metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -1956,7 +1956,7 @@ kind: Deployment metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -2034,7 +2034,7 @@ kind: Deployment metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana-dashboards.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana-dashboards.yaml index d5bac47d8..57a061ba5 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana-dashboards.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana-dashboards.yaml @@ -5,7 +5,7 @@ kind: ConfigMap metadata: name: example-grafana-dashboards labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrole.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrole.yaml index 37dd3e814..41ad3ca81 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrole.yaml @@ -4,10 +4,10 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-clusterrole rules: [] diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrolebinding.yaml index 1bb386013..b29ae8821 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/clusterrolebinding.yaml @@ -5,10 +5,10 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: example-grafana-clusterrolebinding labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/configmap.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/configmap.yaml index d85c20ab6..1be423a56 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/configmap.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/configmap.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: grafana.ini: | diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/deployment.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/deployment.yaml index 703abeec6..b65d0eb83 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/deployment.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/deployment.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -26,10 +26,11 @@ spec: app.kubernetes.io/name: grafana app.kubernetes.io/instance: example annotations: - checksum/config: 0bd174d19e5d455ad71aea624e65ae51fd412a6b3c54fba8db25d85a897c0f1b + checksum/config: faa523acbb15ce366dd2353d5417b71479ace592c9694e315997add86f6592bc checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b - checksum/secret: 159754aaf8e3f63d0512321d2f4ec32957e240bc75210d5e24303e6d266666b4 + checksum/secret: a5b2dd94ad3d3c38ad2f31d52e0b0588f2df6263a230726d9f6673b8b23105da + kubectl.kubernetes.io/default-container: grafana spec: serviceAccountName: example-grafana @@ -37,12 +38,20 @@ spec: securityContext: fsGroup: 472 runAsGroup: 472 + runAsNonRoot: true runAsUser: 472 enableServiceLinks: true containers: - name: grafana - image: "grafana/grafana:9.4.7" + image: "docker.io/grafana/grafana:10.0.3" imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault volumeMounts: - name: config mountPath: "/etc/grafana/grafana.ini" diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/role.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/role.yaml index ffe05e274..4bb6ab1b9 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/role.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/role.yaml @@ -6,9 +6,9 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm rules: [] diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/rolebinding.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/rolebinding.yaml index 2ff6b3d95..3fb842746 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/rolebinding.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/rolebinding.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/secret.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/secret.yaml index 8ca5ebfc6..5945da59c 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/secret.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/secret.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm type: Opaque data: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/service.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/service.yaml index 20e30b774..397a3d56c 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/service.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/service.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/serviceaccount.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/serviceaccount.yaml index 2d4090b96..7bdd349a1 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana namespace: default diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-configmap.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-configmap.yaml index 783067d38..22511fc1f 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-configmap.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-configmap.yaml @@ -9,10 +9,10 @@ metadata: "helm.sh/hook": test-success "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: run.sh: |- diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-serviceaccount.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-serviceaccount.yaml index 5b43f7194..9cf86f6c9 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test-serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-test namespace: default diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test.yaml index 27a0e8224..ae03b9581 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/grafana/tests/test.yaml @@ -5,10 +5,10 @@ kind: Pod metadata: name: example-grafana-test labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test-success @@ -18,7 +18,7 @@ spec: serviceAccountName: example-grafana-test containers: - name: example-test - image: "bats/bats:v1.4.1" + image: "docker.io/bats/bats:v1.4.1" imagePullPolicy: "IfNotPresent" command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] volumeMounts: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-agent-svc.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-agent-svc.yaml index 13c4b0a18..19d9ec47c 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-agent-svc.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-agent-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-agent labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-agent spec: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-collector-svc.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-collector-svc.yaml index 8786c0e7b..1232c2f82 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-collector-svc.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-collector-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-collector labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-collector spec: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-deploy.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-deploy.yaml index 4518b9fe4..d492c2124 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-deploy.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-deploy.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one prometheus.io/port: "14269" @@ -44,7 +44,7 @@ spec: value: "false" - name: COLLECTOR_OTLP_ENABLED value: "true" - image: jaegertracing/all-in-one:1.42.0 + image: jaegertracing/all-in-one:1.45.0 imagePullPolicy: IfNotPresent name: jaeger args: @@ -96,4 +96,6 @@ spec: resources: limits: memory: 300Mi + volumeMounts: serviceAccountName: example-jaeger + volumes: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-query-svc.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-query-svc.yaml index da5bade96..032bee30f 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-query-svc.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-query-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-query labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-query spec: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-sa.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-sa.yaml index a5ae7bb57..fa14f6868 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-sa.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/jaeger/allinone-sa.yaml @@ -5,9 +5,9 @@ kind: ServiceAccount metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/configmap.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/configmap.yaml index 65cc605f7..f201a599a 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/configmap.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/configmap.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/deployment.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/deployment.yaml index ca63bd8de..6480aa00d 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/deployment.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -23,7 +23,7 @@ spec: template: metadata: annotations: - checksum/config: a3a9263b6d6111b94da158ff4c9d9fd67c6cb3aed729b134ea907554519dfa6e + checksum/config: 20851ed0c4bdafbefc52a90b489ba1a5a377b40e2add9425e2e4de2dcfce0f2d opentelemetry_community_demo: "true" prometheus.io/port: "9464" prometheus.io/scrape: "true" @@ -44,7 +44,7 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.76.1" + image: "otel/opentelemetry-collector-contrib:0.82.0" imagePullPolicy: IfNotPresent ports: - name: jaeger-compact diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/service.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/service.yaml index 38af05c71..d431f6ea5 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/service.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/service.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: @@ -52,3 +52,4 @@ spec: app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/serviceaccount.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/serviceaccount.yaml index 2e262e6fb..5601241bf 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/opentelemetry-collector/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrole.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrole.yaml index f33254581..dc9341909 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrole.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server rules: - apiGroups: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrolebinding.yaml index 114cb7979..5b48ce6a0 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/clusterrolebinding.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/cm.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/cm.yaml index 149a9bebf..a1d733410 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/cm.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/cm.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default data: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/deploy.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/deploy.yaml index b5578f03b..cbf307609 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/deploy.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/deploy.yaml @@ -4,38 +4,43 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: selector: matchLabels: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example replicas: 1 + revisionHistoryLimit: 10 strategy: type: Recreate rollingUpdate: null template: metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus spec: enableServiceLinks: true serviceAccountName: example-prometheus-server containers: - name: prometheus-server - image: "quay.io/prometheus/prometheus:v2.43.0" + image: "quay.io/prometheus/prometheus:v2.46.0" imagePullPolicy: "IfNotPresent" args: - --storage.tsdb.retention.time=15d diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/service.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/service.yaml index 8e4cc196d..e184e9f0d 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/service.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/service.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: Service metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: @@ -18,8 +20,8 @@ spec: protocol: TCP targetPort: 9090 selector: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example sessionAffinity: None type: "ClusterIP" diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/serviceaccount.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/serviceaccount.yaml index c9d51410f..1ebd2db1e 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/prometheus/serviceaccount.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default annotations: diff --git a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/serviceaccount.yaml b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/serviceaccount.yaml index 8ffa88bbd..386fdd068 100644 --- a/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/custom-environment-variables/rendered/serviceaccount.yaml @@ -5,7 +5,7 @@ kind: ServiceAccount metadata: name: example labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/default/rendered/component.yaml b/charts/opentelemetry-demo/examples/default/rendered/component.yaml index cc3491511..15b691cb0 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/component.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/component.yaml @@ -5,7 +5,7 @@ kind: Service metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -30,7 +30,7 @@ kind: Service metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -55,7 +55,7 @@ kind: Service metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -80,7 +80,7 @@ kind: Service metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -105,7 +105,7 @@ kind: Service metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -130,7 +130,7 @@ kind: Service metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -158,7 +158,7 @@ kind: Service metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -183,7 +183,7 @@ kind: Service metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -208,7 +208,7 @@ kind: Service metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -233,7 +233,7 @@ kind: Service metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -261,7 +261,7 @@ kind: Service metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -286,7 +286,7 @@ kind: Service metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -311,7 +311,7 @@ kind: Service metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -336,7 +336,7 @@ kind: Service metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -361,7 +361,7 @@ kind: Service metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -386,7 +386,7 @@ kind: Service metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -411,7 +411,7 @@ kind: Service metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example @@ -436,7 +436,7 @@ kind: Deployment metadata: name: example-accountingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-accountingservice app.kubernetes.io/instance: example @@ -518,7 +518,7 @@ kind: Deployment metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -600,7 +600,7 @@ kind: Deployment metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -690,7 +690,7 @@ kind: Deployment metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -790,7 +790,7 @@ kind: Deployment metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -868,7 +868,7 @@ kind: Deployment metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -948,7 +948,7 @@ kind: Deployment metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -1048,7 +1048,7 @@ kind: Deployment metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -1132,7 +1132,7 @@ kind: Deployment metadata: name: example-frauddetectionservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frauddetectionservice app.kubernetes.io/instance: example @@ -1214,7 +1214,7 @@ kind: Deployment metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -1316,7 +1316,7 @@ kind: Deployment metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -1420,7 +1420,7 @@ kind: Deployment metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -1506,7 +1506,7 @@ kind: Deployment metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -1596,7 +1596,7 @@ kind: Deployment metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -1678,7 +1678,7 @@ kind: Deployment metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -1758,7 +1758,7 @@ kind: Deployment metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -1842,7 +1842,7 @@ kind: Deployment metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -1928,7 +1928,7 @@ kind: Deployment metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -2006,7 +2006,7 @@ kind: Deployment metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana-dashboards.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana-dashboards.yaml index d5bac47d8..57a061ba5 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana-dashboards.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana-dashboards.yaml @@ -5,7 +5,7 @@ kind: ConfigMap metadata: name: example-grafana-dashboards labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrole.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrole.yaml index 37dd3e814..41ad3ca81 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrole.yaml @@ -4,10 +4,10 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-clusterrole rules: [] diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrolebinding.yaml index 1bb386013..b29ae8821 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/clusterrolebinding.yaml @@ -5,10 +5,10 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: example-grafana-clusterrolebinding labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/configmap.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/configmap.yaml index d85c20ab6..1be423a56 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/configmap.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/configmap.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: grafana.ini: | diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/deployment.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/deployment.yaml index 703abeec6..b65d0eb83 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/deployment.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/deployment.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -26,10 +26,11 @@ spec: app.kubernetes.io/name: grafana app.kubernetes.io/instance: example annotations: - checksum/config: 0bd174d19e5d455ad71aea624e65ae51fd412a6b3c54fba8db25d85a897c0f1b + checksum/config: faa523acbb15ce366dd2353d5417b71479ace592c9694e315997add86f6592bc checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b - checksum/secret: 159754aaf8e3f63d0512321d2f4ec32957e240bc75210d5e24303e6d266666b4 + checksum/secret: a5b2dd94ad3d3c38ad2f31d52e0b0588f2df6263a230726d9f6673b8b23105da + kubectl.kubernetes.io/default-container: grafana spec: serviceAccountName: example-grafana @@ -37,12 +38,20 @@ spec: securityContext: fsGroup: 472 runAsGroup: 472 + runAsNonRoot: true runAsUser: 472 enableServiceLinks: true containers: - name: grafana - image: "grafana/grafana:9.4.7" + image: "docker.io/grafana/grafana:10.0.3" imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault volumeMounts: - name: config mountPath: "/etc/grafana/grafana.ini" diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/role.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/role.yaml index ffe05e274..4bb6ab1b9 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/role.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/role.yaml @@ -6,9 +6,9 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm rules: [] diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/rolebinding.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/rolebinding.yaml index 2ff6b3d95..3fb842746 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/rolebinding.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/rolebinding.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/secret.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/secret.yaml index 8ca5ebfc6..5945da59c 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/secret.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/secret.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm type: Opaque data: diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/service.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/service.yaml index 20e30b774..397a3d56c 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/service.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/service.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/serviceaccount.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/serviceaccount.yaml index 2d4090b96..7bdd349a1 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana namespace: default diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-configmap.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-configmap.yaml index 783067d38..22511fc1f 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-configmap.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-configmap.yaml @@ -9,10 +9,10 @@ metadata: "helm.sh/hook": test-success "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: run.sh: |- diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-serviceaccount.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-serviceaccount.yaml index 5b43f7194..9cf86f6c9 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test-serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-test namespace: default diff --git a/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test.yaml b/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test.yaml index 27a0e8224..ae03b9581 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/grafana/tests/test.yaml @@ -5,10 +5,10 @@ kind: Pod metadata: name: example-grafana-test labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test-success @@ -18,7 +18,7 @@ spec: serviceAccountName: example-grafana-test containers: - name: example-test - image: "bats/bats:v1.4.1" + image: "docker.io/bats/bats:v1.4.1" imagePullPolicy: "IfNotPresent" command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] volumeMounts: diff --git a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-agent-svc.yaml b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-agent-svc.yaml index 13c4b0a18..19d9ec47c 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-agent-svc.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-agent-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-agent labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-agent spec: diff --git a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-collector-svc.yaml b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-collector-svc.yaml index 8786c0e7b..1232c2f82 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-collector-svc.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-collector-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-collector labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-collector spec: diff --git a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-deploy.yaml b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-deploy.yaml index 4518b9fe4..d492c2124 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-deploy.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-deploy.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one prometheus.io/port: "14269" @@ -44,7 +44,7 @@ spec: value: "false" - name: COLLECTOR_OTLP_ENABLED value: "true" - image: jaegertracing/all-in-one:1.42.0 + image: jaegertracing/all-in-one:1.45.0 imagePullPolicy: IfNotPresent name: jaeger args: @@ -96,4 +96,6 @@ spec: resources: limits: memory: 300Mi + volumeMounts: serviceAccountName: example-jaeger + volumes: diff --git a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-query-svc.yaml b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-query-svc.yaml index da5bade96..032bee30f 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-query-svc.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-query-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-query labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-query spec: diff --git a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-sa.yaml b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-sa.yaml index a5ae7bb57..fa14f6868 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-sa.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/jaeger/allinone-sa.yaml @@ -5,9 +5,9 @@ kind: ServiceAccount metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one diff --git a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/configmap.yaml b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/configmap.yaml index cb58ac717..1e65a6edd 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/configmap.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/configmap.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/deployment.yaml b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/deployment.yaml index a8cc31e28..13c7ec436 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/deployment.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -23,7 +23,7 @@ spec: template: metadata: annotations: - checksum/config: 70766a04d09e3d8f05813f244cafb07d295308fbae0aa3750f667c41619a50c1 + checksum/config: 642250a36237ea6c5d9137695bbc2874323b1d1849096d6e61789e892007fbf3 opentelemetry_community_demo: "true" prometheus.io/port: "9464" prometheus.io/scrape: "true" @@ -44,7 +44,7 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.76.1" + image: "otel/opentelemetry-collector-contrib:0.82.0" imagePullPolicy: IfNotPresent ports: - name: jaeger-compact diff --git a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/service.yaml b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/service.yaml index 38af05c71..d431f6ea5 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/service.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/service.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: @@ -52,3 +52,4 @@ spec: app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/serviceaccount.yaml b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/serviceaccount.yaml index 2e262e6fb..5601241bf 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/opentelemetry-collector/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrole.yaml b/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrole.yaml index f33254581..dc9341909 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrole.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server rules: - apiGroups: diff --git a/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrolebinding.yaml index 114cb7979..5b48ce6a0 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/prometheus/clusterrolebinding.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/default/rendered/prometheus/cm.yaml b/charts/opentelemetry-demo/examples/default/rendered/prometheus/cm.yaml index 149a9bebf..a1d733410 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/prometheus/cm.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/prometheus/cm.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default data: diff --git a/charts/opentelemetry-demo/examples/default/rendered/prometheus/deploy.yaml b/charts/opentelemetry-demo/examples/default/rendered/prometheus/deploy.yaml index b5578f03b..cbf307609 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/prometheus/deploy.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/prometheus/deploy.yaml @@ -4,38 +4,43 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: selector: matchLabels: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example replicas: 1 + revisionHistoryLimit: 10 strategy: type: Recreate rollingUpdate: null template: metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus spec: enableServiceLinks: true serviceAccountName: example-prometheus-server containers: - name: prometheus-server - image: "quay.io/prometheus/prometheus:v2.43.0" + image: "quay.io/prometheus/prometheus:v2.46.0" imagePullPolicy: "IfNotPresent" args: - --storage.tsdb.retention.time=15d diff --git a/charts/opentelemetry-demo/examples/default/rendered/prometheus/service.yaml b/charts/opentelemetry-demo/examples/default/rendered/prometheus/service.yaml index 8e4cc196d..e184e9f0d 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/prometheus/service.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/prometheus/service.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: Service metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: @@ -18,8 +20,8 @@ spec: protocol: TCP targetPort: 9090 selector: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example sessionAffinity: None type: "ClusterIP" diff --git a/charts/opentelemetry-demo/examples/default/rendered/prometheus/serviceaccount.yaml b/charts/opentelemetry-demo/examples/default/rendered/prometheus/serviceaccount.yaml index c9d51410f..1ebd2db1e 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/prometheus/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/prometheus/serviceaccount.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default annotations: diff --git a/charts/opentelemetry-demo/examples/default/rendered/serviceaccount.yaml b/charts/opentelemetry-demo/examples/default/rendered/serviceaccount.yaml index 8ffa88bbd..386fdd068 100644 --- a/charts/opentelemetry-demo/examples/default/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/default/rendered/serviceaccount.yaml @@ -5,7 +5,7 @@ kind: ServiceAccount metadata: name: example labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/component.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/component.yaml index cfe77332c..2611a3fbf 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/component.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/component.yaml @@ -5,7 +5,7 @@ kind: Service metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -30,7 +30,7 @@ kind: Service metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -55,7 +55,7 @@ kind: Service metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -80,7 +80,7 @@ kind: Service metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -105,7 +105,7 @@ kind: Service metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -130,7 +130,7 @@ kind: Service metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -158,7 +158,7 @@ kind: Service metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -183,7 +183,7 @@ kind: Service metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -208,7 +208,7 @@ kind: Service metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -233,7 +233,7 @@ kind: Service metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -261,7 +261,7 @@ kind: Service metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -286,7 +286,7 @@ kind: Service metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -311,7 +311,7 @@ kind: Service metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -336,7 +336,7 @@ kind: Service metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -361,7 +361,7 @@ kind: Service metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -386,7 +386,7 @@ kind: Service metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -411,7 +411,7 @@ kind: Service metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example @@ -436,7 +436,7 @@ kind: Deployment metadata: name: example-accountingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-accountingservice app.kubernetes.io/instance: example @@ -518,7 +518,7 @@ kind: Deployment metadata: name: example-adservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-adservice app.kubernetes.io/instance: example @@ -600,7 +600,7 @@ kind: Deployment metadata: name: example-cartservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-cartservice app.kubernetes.io/instance: example @@ -690,7 +690,7 @@ kind: Deployment metadata: name: example-checkoutservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-checkoutservice app.kubernetes.io/instance: example @@ -790,7 +790,7 @@ kind: Deployment metadata: name: example-currencyservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-currencyservice app.kubernetes.io/instance: example @@ -868,7 +868,7 @@ kind: Deployment metadata: name: example-emailservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-emailservice app.kubernetes.io/instance: example @@ -948,7 +948,7 @@ kind: Deployment metadata: name: example-featureflagservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-featureflagservice app.kubernetes.io/instance: example @@ -1048,7 +1048,7 @@ kind: Deployment metadata: name: example-ffspostgres labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-ffspostgres app.kubernetes.io/instance: example @@ -1132,7 +1132,7 @@ kind: Deployment metadata: name: example-frauddetectionservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frauddetectionservice app.kubernetes.io/instance: example @@ -1214,7 +1214,7 @@ kind: Deployment metadata: name: example-frontend labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontend app.kubernetes.io/instance: example @@ -1316,7 +1316,7 @@ kind: Deployment metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example @@ -1420,7 +1420,7 @@ kind: Deployment metadata: name: example-kafka labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-kafka app.kubernetes.io/instance: example @@ -1506,7 +1506,7 @@ kind: Deployment metadata: name: example-loadgenerator labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-loadgenerator app.kubernetes.io/instance: example @@ -1596,7 +1596,7 @@ kind: Deployment metadata: name: example-paymentservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-paymentservice app.kubernetes.io/instance: example @@ -1678,7 +1678,7 @@ kind: Deployment metadata: name: example-productcatalogservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-productcatalogservice app.kubernetes.io/instance: example @@ -1758,7 +1758,7 @@ kind: Deployment metadata: name: example-quoteservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-quoteservice app.kubernetes.io/instance: example @@ -1842,7 +1842,7 @@ kind: Deployment metadata: name: example-recommendationservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-recommendationservice app.kubernetes.io/instance: example @@ -1928,7 +1928,7 @@ kind: Deployment metadata: name: example-redis labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-redis app.kubernetes.io/instance: example @@ -2006,7 +2006,7 @@ kind: Deployment metadata: name: example-shippingservice labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-shippingservice app.kubernetes.io/instance: example @@ -2086,7 +2086,7 @@ kind: Ingress metadata: name: example-frontendproxy labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example-frontendproxy app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana-dashboards.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana-dashboards.yaml index d5bac47d8..57a061ba5 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana-dashboards.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana-dashboards.yaml @@ -5,7 +5,7 @@ kind: ConfigMap metadata: name: example-grafana-dashboards labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrole.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrole.yaml index 37dd3e814..41ad3ca81 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrole.yaml @@ -4,10 +4,10 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-clusterrole rules: [] diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrolebinding.yaml index 1bb386013..b29ae8821 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/clusterrolebinding.yaml @@ -5,10 +5,10 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: example-grafana-clusterrolebinding labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/configmap.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/configmap.yaml index d85c20ab6..1be423a56 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/configmap.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/configmap.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: grafana.ini: | diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/deployment.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/deployment.yaml index 703abeec6..b65d0eb83 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/deployment.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/deployment.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -26,10 +26,11 @@ spec: app.kubernetes.io/name: grafana app.kubernetes.io/instance: example annotations: - checksum/config: 0bd174d19e5d455ad71aea624e65ae51fd412a6b3c54fba8db25d85a897c0f1b + checksum/config: faa523acbb15ce366dd2353d5417b71479ace592c9694e315997add86f6592bc checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b - checksum/secret: 159754aaf8e3f63d0512321d2f4ec32957e240bc75210d5e24303e6d266666b4 + checksum/secret: a5b2dd94ad3d3c38ad2f31d52e0b0588f2df6263a230726d9f6673b8b23105da + kubectl.kubernetes.io/default-container: grafana spec: serviceAccountName: example-grafana @@ -37,12 +38,20 @@ spec: securityContext: fsGroup: 472 runAsGroup: 472 + runAsNonRoot: true runAsUser: 472 enableServiceLinks: true containers: - name: grafana - image: "grafana/grafana:9.4.7" + image: "docker.io/grafana/grafana:10.0.3" imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault volumeMounts: - name: config mountPath: "/etc/grafana/grafana.ini" diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/role.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/role.yaml index ffe05e274..4bb6ab1b9 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/role.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/role.yaml @@ -6,9 +6,9 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm rules: [] diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/rolebinding.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/rolebinding.yaml index 2ff6b3d95..3fb842746 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/rolebinding.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/rolebinding.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/secret.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/secret.yaml index 8ca5ebfc6..5945da59c 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/secret.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/secret.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm type: Opaque data: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/service.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/service.yaml index 20e30b774..397a3d56c 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/service.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/service.yaml @@ -6,10 +6,10 @@ metadata: name: example-grafana namespace: default labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/serviceaccount.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/serviceaccount.yaml index 2d4090b96..7bdd349a1 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana namespace: default diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-configmap.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-configmap.yaml index 783067d38..22511fc1f 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-configmap.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-configmap.yaml @@ -9,10 +9,10 @@ metadata: "helm.sh/hook": test-success "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm data: run.sh: |- diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-serviceaccount.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-serviceaccount.yaml index 5b43f7194..9cf86f6c9 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test-serviceaccount.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm name: example-grafana-test namespace: default diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test.yaml index 27a0e8224..ae03b9581 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/grafana/tests/test.yaml @@ -5,10 +5,10 @@ kind: Pod metadata: name: example-grafana-test labels: - helm.sh/chart: grafana-6.52.8 + helm.sh/chart: grafana-6.58.8 app.kubernetes.io/name: grafana app.kubernetes.io/instance: example - app.kubernetes.io/version: "9.4.7" + app.kubernetes.io/version: "10.0.3" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test-success @@ -18,7 +18,7 @@ spec: serviceAccountName: example-grafana-test containers: - name: example-test - image: "bats/bats:v1.4.1" + image: "docker.io/bats/bats:v1.4.1" imagePullPolicy: "IfNotPresent" command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] volumeMounts: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-agent-svc.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-agent-svc.yaml index 13c4b0a18..19d9ec47c 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-agent-svc.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-agent-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-agent labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-agent spec: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-collector-svc.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-collector-svc.yaml index 8786c0e7b..1232c2f82 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-collector-svc.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-collector-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-collector labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-collector spec: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-deploy.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-deploy.yaml index 4518b9fe4..d492c2124 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-deploy.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-deploy.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one prometheus.io/port: "14269" @@ -44,7 +44,7 @@ spec: value: "false" - name: COLLECTOR_OTLP_ENABLED value: "true" - image: jaegertracing/all-in-one:1.42.0 + image: jaegertracing/all-in-one:1.45.0 imagePullPolicy: IfNotPresent name: jaeger args: @@ -96,4 +96,6 @@ spec: resources: limits: memory: 300Mi + volumeMounts: serviceAccountName: example-jaeger + volumes: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-query-svc.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-query-svc.yaml index da5bade96..032bee30f 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-query-svc.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-query-svc.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-jaeger-query labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: service-query spec: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-sa.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-sa.yaml index a5ae7bb57..fa14f6868 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-sa.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/jaeger/allinone-sa.yaml @@ -5,9 +5,9 @@ kind: ServiceAccount metadata: name: example-jaeger labels: - helm.sh/chart: jaeger-0.69.1 + helm.sh/chart: jaeger-0.71.11 app.kubernetes.io/name: jaeger app.kubernetes.io/instance: example - app.kubernetes.io/version: "1.42.0" + app.kubernetes.io/version: "1.45.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: all-in-one diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/configmap.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/configmap.yaml index cb58ac717..1e65a6edd 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/configmap.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/configmap.yaml @@ -5,10 +5,10 @@ kind: ConfigMap metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm data: relay: | diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/deployment.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/deployment.yaml index a8cc31e28..13c7ec436 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/deployment.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -23,7 +23,7 @@ spec: template: metadata: annotations: - checksum/config: 70766a04d09e3d8f05813f244cafb07d295308fbae0aa3750f667c41619a50c1 + checksum/config: 642250a36237ea6c5d9137695bbc2874323b1d1849096d6e61789e892007fbf3 opentelemetry_community_demo: "true" prometheus.io/port: "9464" prometheus.io/scrape: "true" @@ -44,7 +44,7 @@ spec: - --config=/conf/relay.yaml securityContext: {} - image: "otel/opentelemetry-collector-contrib:0.76.1" + image: "otel/opentelemetry-collector-contrib:0.82.0" imagePullPolicy: IfNotPresent ports: - name: jaeger-compact diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/ingress.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/ingress.yaml index 8ebb61918..2bc7209e9 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/ingress.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/ingress.yaml @@ -5,10 +5,10 @@ kind: Ingress metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/service.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/service.yaml index 38af05c71..d431f6ea5 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/service.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/service.yaml @@ -5,10 +5,10 @@ kind: Service metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm component: standalone-collector spec: @@ -52,3 +52,4 @@ spec: app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/serviceaccount.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/serviceaccount.yaml index 2e262e6fb..5601241bf 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/opentelemetry-collector/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: example-otelcol labels: - helm.sh/chart: opentelemetry-collector-0.55.1 + helm.sh/chart: opentelemetry-collector-0.65.1 app.kubernetes.io/name: otelcol app.kubernetes.io/instance: example - app.kubernetes.io/version: "0.76.1" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrole.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrole.yaml index f33254581..dc9341909 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrole.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrole.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server rules: - apiGroups: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrolebinding.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrolebinding.yaml index 114cb7979..5b48ce6a0 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrolebinding.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/clusterrolebinding.yaml @@ -4,11 +4,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server subjects: - kind: ServiceAccount diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/cm.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/cm.yaml index 149a9bebf..a1d733410 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/cm.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/cm.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ConfigMap metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default data: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/deploy.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/deploy.yaml index b5578f03b..cbf307609 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/deploy.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/deploy.yaml @@ -4,38 +4,43 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: selector: matchLabels: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example replicas: 1 + revisionHistoryLimit: 10 strategy: type: Recreate rollingUpdate: null template: metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus spec: enableServiceLinks: true serviceAccountName: example-prometheus-server containers: - name: prometheus-server - image: "quay.io/prometheus/prometheus:v2.43.0" + image: "quay.io/prometheus/prometheus:v2.46.0" imagePullPolicy: "IfNotPresent" args: - --storage.tsdb.retention.time=15d diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/service.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/service.yaml index 8e4cc196d..e184e9f0d 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/service.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/service.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: Service metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default spec: @@ -18,8 +20,8 @@ spec: protocol: TCP targetPort: 9090 selector: - component: "server" - app: prometheus - release: example + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example sessionAffinity: None type: "ClusterIP" diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/serviceaccount.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/serviceaccount.yaml index c9d51410f..1ebd2db1e 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/prometheus/serviceaccount.yaml @@ -4,11 +4,13 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - component: "server" - app: prometheus - release: example - chart: prometheus-20.2.0 - heritage: Helm + app.kubernetes.io/component: server + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: example + app.kubernetes.io/version: v2.46.0 + helm.sh/chart: prometheus-23.3.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: prometheus name: example-prometheus-server namespace: default annotations: diff --git a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/serviceaccount.yaml b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/serviceaccount.yaml index 8ffa88bbd..386fdd068 100644 --- a/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-demo/examples/public-hosted-ingress/rendered/serviceaccount.yaml @@ -5,7 +5,7 @@ kind: ServiceAccount metadata: name: example labels: - helm.sh/chart: opentelemetry-demo-0.22.3 + helm.sh/chart: opentelemetry-demo-0.23.0 opentelemetry.io/name: example app.kubernetes.io/instance: example diff --git a/charts/opentelemetry-operator/Chart.yaml b/charts/opentelemetry-operator/Chart.yaml index ef15cc83a..0e8cac2db 100644 --- a/charts/opentelemetry-operator/Chart.yaml +++ b/charts/opentelemetry-operator/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: opentelemetry-operator -version: 0.32.0 +version: 0.35.1 description: OpenTelemetry Operator Helm chart for Kubernetes type: application home: https://opentelemetry.io/ @@ -11,4 +11,4 @@ maintainers: - name: dmitryax - name: TylerHelmuth icon: https://raw.githubusercontent.com/cncf/artwork/a718fa97fffec1b9fd14147682e9e3ac0c8817cb/projects/opentelemetry/icon/color/opentelemetry-icon-color.png -appVersion: 0.79.0 +appVersion: 0.82.0 diff --git a/charts/opentelemetry-operator/README.md b/charts/opentelemetry-operator/README.md index d70a09c71..8905acce6 100644 --- a/charts/opentelemetry-operator/README.md +++ b/charts/opentelemetry-operator/README.md @@ -18,10 +18,10 @@ certificate that the API server is configured to trust. There are a few differen In this way, cert-manager will generate a self-signed certificate. _See [cert-manager installation](https://cert-manager.io/docs/installation/kubernetes/) for more details._ - You can provide your own Issuer by configuring the `admissionWebhooks.certManager.issuerRef` value. You will need to specify the `kind` (Issuer or ClusterIssuer) and the `name`. Note that this method also requires the installation of cert-manager. - - You can use an automatically generated self-signed certificate by setting `admissionWebhooks.certManager.enabled` to `false` and `admissionWebhooks.autoGenerateCert` to `true`. Helm will create a self-signd cert and a secret for you. + - You can use an automatically generated self-signed certificate by setting `admissionWebhooks.certManager.enabled` to `false` and `admissionWebhooks.autoGenerateCert` to `true`. Helm will create a self-signed cert and a secret for you. - You can use your own generated self-signed certificate by setting both `admissionWebhooks.certManager.enabled` and `admissionWebhooks.autoGenerateCert` to `false`. You should provide the necessary values to `admissionWebhooks.cert_file`, `admissionWebhooks.key_file`, and `admissionWebhooks.ca_file`. - You can sideload custom webhooks and certificate by disabling `.Values.admissionWebhooks.create` and `admissionWebhooks.certManager.enabled` while setting your custom cert secret name in `admissionWebhooks.secretName` - - You can disable webhooks alltogether by disabling `.Values.admissionWebhooks.create` and setting env var to `ENABLE_WEBHOOKS: "false"` + - You can disable webhooks altogether by disabling `.Values.admissionWebhooks.create` and setting env var to `ENABLE_WEBHOOKS: "false"` ## Add Repository @@ -46,6 +46,13 @@ $ helm install --namespace opentelemetry-operator-system \ opentelemetry-operator open-telemetry/opentelemetry-operator ``` +If you wish for helm to create an automatically generated self-signed certificate, make sure to set the appropriate values when installing the chart: + +```console +$ helm install --set admissionWebhooks.certManager.enabled=false --set admissionWebhooks.certManager.autoGenerateCert=true \ + opentelemetry-operator open-telemetry/opentelemetry-operator +``` + _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ ## Uninstall Chart @@ -106,7 +113,7 @@ to an early version if anything unexpected happens, pause the Collector, etc. In instance just as an application. The following example configuration deploys the Collector as Deployment resource. The receiver is Jaeger receiver and -the exporter is logging exporter. +the exporter is [logging exporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/loggingexporter). ```console $ kubectl apply -f - <'']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object required: - - port + - name type: object - tcpSocket: - description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler - and kept for the backward compatibility. There are no validation - of this field and lifecycle hooks will fail in runtime when - tcp handler is specified. + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event such - as liveness/startup probe failure, preemption, resource contention, - etc. The handler is not called if the container crashes or exits. - The Pod''s termination grace period countdown begins before - the PreStop hook is executed. Regardless of the outcome of the - handler, the container will eventually terminate within the - Pod''s termination grace period (unless delayed by finalizers). - Other management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. properties: - name: - description: The header field name. This will be - canonicalized upon output, so case-variant names - will be understood as the same header. + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. type: string - value: - description: The header field value + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. type: string required: - - name - - value + - port type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + lifecycle: + description: Actions that the management system should take in response + to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name. This will be + canonicalized upon output, so case-variant names + will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler + and kept for the backward compatibility. There are no validation + of this field and lifecycle hooks will fail in runtime when + tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event such + as liveness/startup probe failure, preemption, resource contention, + etc. The handler is not called if the container crashes or exits. + The Pod''s termination grace period countdown begins before + the PreStop hook is executed. Regardless of the outcome of the + handler, the container will eventually terminate within the + Pod''s termination grace period (unless delayed by finalizers). + Other management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name. This will be + canonicalized upon output, so case-variant names + will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port @@ -1644,6 +2922,14 @@ spec: format: int32 type: integer type: object + managementState: + default: managed + description: ManagementState defines if the CR should be managed by + the operator or not. Default is managed. + enum: + - managed + - unmanaged + type: string maxReplicas: description: 'MaxReplicas sets an upper bound to the autoscaling feature. If MaxReplicas is set autoscaling is enabled. Deprecated: use "OpenTelemetryCollector.Spec.Autoscaler.MaxReplicas" @@ -1672,6 +2958,19 @@ spec: This is only relevant to daemonset, statefulset, and deployment mode type: object + observability: + description: ObservabilitySpec defines how telemetry data gets handled. + properties: + metrics: + description: Metrics defines the metrics configuration for operands. + properties: + enableMetrics: + description: EnableMetrics specifies if ServiceMonitors should + be created for the OpenTelemetry Collector. The operator.observability.prometheus + feature gate must be enabled to use this feature. + type: boolean + type: object + type: object podAnnotations: additionalProperties: type: string @@ -2150,6 +3449,122 @@ spec: description: Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not. type: boolean + env: + description: ENV vars to set on the OpenTelemetry TargetAllocator's + Pods. These can then in certain cases be consumed in the config + file for the TargetAllocator. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array filterStrategy: description: FilterStrategy determines how to filter targets before allocating them among the collectors. The only current option @@ -2160,6 +3575,12 @@ spec: description: Image indicates the container image to use for the OpenTelemetry TargetAllocator. type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector to schedule OpenTelemetry TargetAllocator + pods. + type: object prometheusCR: description: PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 @@ -2179,6 +3600,12 @@ spec: the map is going to exactly match a label in a PodMonitor's meta labels. The requirements are ANDed. type: object + scrapeInterval: + default: 30s + description: "Interval between consecutive scrapes. Equivalent + to the same setting on the Prometheus CRD. \n Default: \"30s\"" + format: duration + type: string serviceMonitorSelector: additionalProperties: type: string @@ -2251,6 +3678,188 @@ spec: service account to use with this instance. When set, the operator will not automatically create a ServiceAccount for the TargetAllocator. type: string + topologySpreadConstraints: + description: TopologySpreadConstraints embedded kubernetes pod + configuration option, controls how pods are spread across your + cluster among failure-domains such as regions, zones, nodes, + and other user-defined topology domains https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming pod + labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading + will be calculated for the incoming pod. The same key + is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't + set. Keys that don't exist in the incoming pod labels + will be ignored. A null or empty list means only match + against labelSelector. \n This is a beta field and requires + the MatchLabelKeysInPodTopologySpread feature gate to + be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of matching + pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | | P P | P P | P | - + if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation + of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater + than minDomains, this value has no effect on scheduling. + As a result, when the number of eligible domains is less + than minDomains, scheduler won't schedule more than maxSkew + Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are + integers greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 | zone2 + | zone3 | | P P | P P | P P | The number of domains + is less than 5(MinDomains), so \"global minimum\" is treated + as 0. In this situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will be 3(3 + - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. \n This is a beta field and requires + the MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching + nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes + are included in the calculations. \n If this value is + nil, the behavior is equivalent to the Honor policy. This + is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat + node taints when calculating pod topology spread skew. + Options are: - Honor: nodes without taints, along with + tainted nodes for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. All nodes + are included. \n If this value is nil, the behavior is + equivalent to the Ignore policy. This is a beta-level + feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible domain + as a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to topologies + that would help reduce the skew. A constraint is considered + "Unsatisfiable" for an incoming pod if and only if every + possible node assignment for that pod would violate "MaxSkew" + on some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread + as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) + as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still be imbalanced, but + scheduler won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object terminationGracePeriodSeconds: description: Duration in seconds the pod needs to terminate gracefully @@ -2299,6 +3908,182 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: TopologySpreadConstraints embedded kubernetes pod configuration + option, controls how pods are spread across your cluster among failure-domains + such as regions, zones, nodes, and other user-defined topology domains + https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + This is only relevant to statefulset, and deployment mode + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select + the pods over which spreading will be calculated. The keys + are used to lookup values from the incoming pod labels, those + key-value labels are ANDed with labelSelector to select the + group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in + both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot + be set when LabelSelector isn't set. Keys that don't exist + in the incoming pod labels will be ignored. A null or empty + list means only match against labelSelector. \n This is a + beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods + in an eligible domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. | + zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 to become + 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation of + Skew is performed. And when the number of eligible domains + with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, scheduler + won't schedule more than maxSkew Pods to those domains. If + value is nil, the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. When value + is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For + example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains + is set to 5 and pods with the same labelSelector spread as + 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), so \"global + minimum\" is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because computed + skew will be 3(3 - 0) if new Pod is scheduled to any of the + three zones, it will violate MaxSkew. \n This is a beta field + and requires the MinDomainsInPodTopologySpread feature gate + to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. \n + If this value is nil, the behavior is equivalent to the Honor + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node + taints when calculating pod topology spread skew. Options + are: - Honor: nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + \n If this value is nil, the behavior is equivalent to the + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. We define a domain as a particular instance of + a topology. Also, we define an eligible domain as a domain + whose nodes meet the requirements of nodeAffinityPolicy and + nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain of + that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for an + incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array upgradeStrategy: description: UpgradeStrategy represents how the operator will handle upgrades to the CR when a newer version of the operator is deployed diff --git a/charts/opentelemetry-operator/crds/crd-opentelemetryinstrumentation.yaml b/charts/opentelemetry-operator/crds/crd-opentelemetryinstrumentation.yaml index b84079a40..8765ee7fa 100644 --- a/charts/opentelemetry-operator/crds/crd-opentelemetryinstrumentation.yaml +++ b/charts/opentelemetry-operator/crds/crd-opentelemetryinstrumentation.yaml @@ -52,7 +52,7 @@ spec: SDK and instrumentation. properties: apacheHttpd: - description: Apache defines configuration for Apache HTTPD auto-instrumentation. + description: ApacheHttpd defines configuration for Apache HTTPD auto-instrumentation. properties: attrs: description: 'Attrs defines Apache HTTPD agent specific attributes. @@ -998,6 +998,300 @@ spec: type: object type: object type: object + nginx: + description: Nginx defines configuration for Nginx auto-instrumentation. + properties: + attrs: + description: 'Attrs defines Nginx agent specific attributes. The + precedence order is: `agent default attributes` > `instrument + spec attributes` . Attributes are documented at https://github.com/open-telemetry/opentelemetry-cpp-contrib/tree/main/instrumentation/otel-webserver-module' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + configFile: + description: Location of Nginx configuration file. Needed only + if different from default "/etx/nginx/nginx.conf" + type: string + env: + description: 'Env defines Nginx specific env vars. There are four + layers for env vars'' definitions and the precedence order is: + `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: Image is a container image with Nginx SDK and auto-instrumentation. + type: string + resourceRequirements: + description: Resources describes the compute resource requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object nodejs: description: NodeJS defines configuration for nodejs auto-instrumentation. properties: diff --git a/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml b/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml index 1925fdfda..de811d5ae 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml @@ -6,9 +6,9 @@ metadata: annotations: cert-manager.io/inject-ca-from: default/example-opentelemetry-operator-serving-cert labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook @@ -85,9 +85,9 @@ metadata: annotations: cert-manager.io/inject-ca-from: default/example-opentelemetry-operator-serving-cert labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook diff --git a/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml b/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml index fd6235ef7..a3aadb32f 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml @@ -4,9 +4,9 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook @@ -29,9 +29,9 @@ apiVersion: cert-manager.io/v1 kind: Issuer metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook diff --git a/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml b/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml index 47dfa3a28..2620efe5c 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -127,6 +127,18 @@ rules: - get - list - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - networking.k8s.io resources: @@ -179,6 +191,7 @@ rules: - route.openshift.io resources: - routes + - routes/custom-host verbs: - create - delete @@ -201,9 +214,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -219,9 +232,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml b/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml index 1588321c1..f7e4fd175 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -25,9 +25,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml b/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml index 61fa3e2a1..40dd61915 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml @@ -4,9 +4,9 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -33,13 +33,13 @@ spec: - --enable-leader-election - --health-probe-addr=:8081 - --webhook-port=9443 - - --collector-image=otel/opentelemetry-collector-contrib:0.79.0 + - --collector-image=otel/opentelemetry-collector-contrib:0.82.0 command: - /manager env: - name: ENABLE_WEBHOOKS value: "true" - image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:v0.79.0" + image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:v0.82.0" name: manager ports: - containerPort: 8080 diff --git a/charts/opentelemetry-operator/examples/default/rendered/role.yaml b/charts/opentelemetry-operator/examples/default/rendered/role.yaml index c99bd35ef..5bda91bac 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/role.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/role.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml b/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml index 2b1e09610..ffe1f896c 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/service.yaml b/charts/opentelemetry-operator/examples/default/rendered/service.yaml index 5dbdfadc3..795f4bab5 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/service.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/service.yaml @@ -4,9 +4,9 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -31,9 +31,9 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml b/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml index ed51d75e5..17949502f 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml @@ -6,9 +6,9 @@ metadata: name: opentelemetry-operator namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml b/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml index 9e24759ff..21d0de8cf 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml @@ -6,9 +6,9 @@ metadata: name: "example-opentelemetry-operator-cert-manager" namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook diff --git a/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml b/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml index 61e0291c8..ce1d37af3 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml @@ -6,9 +6,9 @@ metadata: name: "example-opentelemetry-operator-metrics" namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -43,9 +43,9 @@ metadata: name: "example-opentelemetry-operator-webhook" namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.32.0 + helm.sh/chart: opentelemetry-operator-0.35.1 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.79.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/templates/clusterrole.yaml b/charts/opentelemetry-operator/templates/clusterrole.yaml index 4bbfb21f2..5dab078cb 100644 --- a/charts/opentelemetry-operator/templates/clusterrole.yaml +++ b/charts/opentelemetry-operator/templates/clusterrole.yaml @@ -122,6 +122,18 @@ rules: - get - list - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - networking.k8s.io resources: @@ -174,6 +186,7 @@ rules: - route.openshift.io resources: - routes + - routes/custom-host verbs: - create - delete diff --git a/charts/opentelemetry-operator/values.schema.json b/charts/opentelemetry-operator/values.schema.json index 937812ee8..d5649f910 100644 --- a/charts/opentelemetry-operator/values.schema.json +++ b/charts/opentelemetry-operator/values.schema.json @@ -139,13 +139,13 @@ "default": "", "title": "The tag Schema", "examples": [ - "v0.77.0" + "v0.81.0" ] } }, "examples": [{ "repository": "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator", - "tag": "v0.77.0" + "tag": "v0.81.0" }] }, "collectorImage": { @@ -879,7 +879,7 @@ "examples": [{ "image": { "repository": "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator", - "tag": "v0.77.0" + "tag": "v0.81.0" }, "collectorImage": { "repository": "otel/opentelemetry-collector-contrib", @@ -1568,7 +1568,7 @@ "manager": { "image": { "repository": "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator", - "tag": "v0.77.0" + "tag": "v0.81.0" }, "collectorImage": { "repository": "otel/opentelemetry-collector-contrib", diff --git a/charts/opentelemetry-operator/values.yaml b/charts/opentelemetry-operator/values.yaml index 3a03fcb3c..1d17a2742 100644 --- a/charts/opentelemetry-operator/values.yaml +++ b/charts/opentelemetry-operator/values.yaml @@ -29,10 +29,10 @@ pdb: manager: image: repository: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - tag: v0.79.0 + tag: v0.82.0 collectorImage: repository: otel/opentelemetry-collector-contrib - tag: 0.79.0 + tag: 0.82.0 targetAllocatorImage: repository: "" tag: ""