diff --git a/Makefile b/Makefile index 028c7d6b0e..2ab168365b 100644 --- a/Makefile +++ b/Makefile @@ -188,7 +188,7 @@ retina-capture-workload: ## build the Retina capture workload ##@ Containers IMAGE_REGISTRY ?= ghcr.io -IMAGE_NAMESPACE ?= $(shell git config --get remote.origin.url | sed -E 's/.*github\.com[\/:]([^\/]+)\/([^\/.]+).git/\1\/\2/') +IMAGE_NAMESPACE ?= $(shell git config --get remote.origin.url | sed -E 's/.*github\.com[\/:]([^\/]+)\/([^\/.]+).git/\1\/\2/' | tr '[:upper:]' '[:lower:]') RETINA_BUILDER_IMAGE = $(IMAGE_NAMESPACE)/retina-builder RETINA_TOOLS_IMAGE = $(IMAGE_NAMESPACE)/retina-tools @@ -395,7 +395,7 @@ coverage: # Code coverage. manifests: cd crd && make manifests && make generate -HELM_IMAGE_TAG ?= v0.0.1-pre.1 +HELM_IMAGE_TAG ?= v0.0.1 # basic/node-level mode helm-install: manifests @@ -447,8 +447,7 @@ helm-install-advanced-local-context: manifests --skip-crds \ --set enabledPlugin_linux="\[dropreason\,packetforward\,linuxutil\,dns\,packetparser\]" \ --set enablePodLevel=true \ - --set enableAnnotations=true \ - --set bypassLookupIPOfInterest=false + --set enableAnnotations=true helm-uninstall: helm uninstall retina -n kube-system @@ -461,50 +460,3 @@ docs: .PHONY: docs-pod docs-prod: docker run -i -p 3000:3000 -v $(PWD):/retina -w /retina/ node:20-alpine npm install --prefix site && npm run build --prefix site - -# Kapinger is a tool to generate traffic for testing Retina. - -kapinger-image: ## build the retina container image. - echo "Building for $(PLATFORM)" - $(MAKE) container-$(CONTAINER_BUILDER) \ - PLATFORM=$(PLATFORM) \ - DOCKERFILE=hack/tools/kapinger/Dockerfile \ - REGISTRY=$(IMAGE_REGISTRY) \ - IMAGE=$(KAPINGER_IMAGE) \ - VERSION=$(TAG) \ - TAG=$(RETINA_PLATFORM_TAG) \ - APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ - CONTEXT_DIR=$(REPO_ROOT) \ - ACTION=--load - -kapinger-image-push: - $(MAKE) container-push \ - IMAGE=$(KAPINGER_IMAGE) \ - TAG=$(RETINA_PLATFORM_TAG) - -kapinger-manifest-create: - $(MAKE) manifest-create \ - PLATFORMS="$(PLATFORMS)" \ - IMAGE=$(KAPINGER_IMAGE) \ - TAG=$(TAG) - -kapinger-manifest-push: - $(MAKE) manifest-push \ - IMAGE=$(KAPINGER_IMAGE) \ - TAG=$(TAG) - -kapinger-image-win-push: - $(MAKE) container-$(CONTAINER_BUILDER) \ - PLATFORM=windows/amd64 \ - DOCKERFILE=hack/tools/kapinger/Dockerfile.windows \ - REGISTRY=$(IMAGE_REGISTRY) \ - IMAGE=$(KAPINGER_IMAGE) \ - VERSION=$(TAG) \ - TAG=$(RETINA_PLATFORM_TAG) \ - CONTEXT_DIR=$(REPO_ROOT) \ - ACTION=--push - -kapinger-skopeo-archive: - $(MAKE) manifest-skopeo-archive \ - IMAGE=$(KAPINGER_IMAGE) \ - TAG=$(TAG) diff --git a/README.md b/README.md index 30f1fdf8f1..c52766035a 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,30 @@ Prerequisites: Go, Helm 2. Follow steps in [Capture CRD](https://retina.sh/docs/captures/#option-2-capture-crd-custom-resource-definition) for documentation of the CRD and examples for setting up Captures. +#### CLI Setup + +Currently, Retina CLI only supports Linux. + +For CLI usage, see [Capture with Retina CLI](../captures/cli.md). + +#### Option 1: Download from Release + +Download `kubectl-retina` from the latest [Retina release](https://github.com/microsoft/retina/releases). +Feel free to move the binary to `/usr/local/bin/`, or add it to your `PATH` otherwise. + +#### Option 2: Build from source + +Clone the Retina repo and execute: + +```shell +make install-kubectl-retina +``` + +Requirements: + +- go 1.21 or newer +- GNU make + ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a diff --git a/docs/contributing/readme.md b/docs/contributing/readme.md index bbf8b4d0e6..0eb5be4ca5 100644 --- a/docs/contributing/readme.md +++ b/docs/contributing/readme.md @@ -54,7 +54,9 @@ export LLVM_VERSION=14 curl -sL https://apt.llvm.org/llvm.sh | sudo bash -s "$LLVM_VERSION" ``` -Download [Helm](https://helm.sh/) as well. +- Download [Helm](https://helm.sh/) +- Fork the repository +- If you want to use [ghcr.io](https://github.com/features/packages) as container registry, login following instructions [here](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-with-a-personal-access-token-classic) ### Test @@ -80,7 +82,8 @@ make retina To build a `retina-agent` container image with specific tag: ```bash -TAG= make retina-image +make retina-image # also pushes to image registy +make retina-operator-image ``` To build binary of a plugin and test it @@ -101,19 +104,10 @@ debug packetforward Received PacketForward data {"Data": "IngressBytes:8 ... ``` -### Deploying Locally on Kind - -```bash -make kind-setup # This deploys a Kind cluster and installs azure NPM -make retina-image && make kind-install # Skip building image if already done -make kind-clean # Delete Kind cluster -``` - -### Deploying on Other Kubernetes Cluster +### Deploying on Kubernetes Cluster 1. Create Kubernetes cluster. -2. Build and push the docker image for Retina: `make retina-image-push IMAGE_REGISTRY=` -3. Install Retina: `make helm-install IMAGE_REGISTRY=` +2. Install Retina: `make helm-install` ### Verify Deployment @@ -152,20 +146,13 @@ anubhab 614516 0.0 0.1 759000 41796 pts/3 Sl+ 14:34 0:00 kubectl port- $ $ curl http://localhost:9090/metrics | grep retina ... -# HELP retina_drop_bytes Total dropped bytes -# TYPE retina_drop_bytes gauge -retina_drop_bytes{direction="unknown",reason="IPTABLE_RULE_DROP"} 480 -# HELP retina_drop_count Total dropped packets -# TYPE retina_drop_count gauge -retina_drop_count{direction="unknown",reason="IPTABLE_RULE_DROP"} 12 -# HELP retina_forward_bytes Total forwarded bytes -# TYPE retina_forward_bytes gauge -retina_forward_bytes{direction="egress"} 1.28357355e+08 -retina_forward_bytes{direction="ingress"} 3.9520696e+08 -# HELP retina_forward_count Total forwarded packets -# TYPE retina_forward_count gauge -retina_forward_count{direction="egress"} 126462 -retina_forward_count{direction="ingress"} 156793 +networkobservability_drop_bytes{direction="unknown",reason="IPTABLE_RULE_DROP"} 480 +networkobservability_drop_count{direction="unknown",reason="IPTABLE_RULE_DROP"} 12 +networkobservability_forward_bytes{direction="egress"} 1.28357355e+08 +networkobservability_forward_bytes{direction="ingress"} 3.9520696e+08 +networkobservability_forward_count{direction="egress"} 126462 +networkobservability_forward_count{direction="ingress"} 156793 +... ``` ### Dashboard/Prometheus/Grafana @@ -182,7 +169,7 @@ Documentation for these technologies: Uninstall `Retina`: ```bash -helm uninstall retina -n kube-system +make helm-uninstall ``` ## Contact diff --git a/docs/installation/grafana.md b/docs/installation/grafana.md index 85e000df7e..0ea730cea9 100644 --- a/docs/installation/grafana.md +++ b/docs/installation/grafana.md @@ -2,13 +2,12 @@ ## Pre-Requisites -Follow either: - - [Unmanaged Prometheus/Grafana](./prometheus-unmanaged.md) or -- [Azure-Hosted Prometheus/Grafana](prometheus-azure-managed.md). Make sure that you're still port-forwarding your server to localhost:9090, or configure your server for some other HTTP endpoint. +Port-forward svc/prometheus-grafana to access from local browser. + ## Configuration 1. Check Grafana to make sure the managed Prometheus datasource exists: @@ -19,7 +18,7 @@ Make sure that you're still port-forwarding your server to localhost:9090, or co ![alt text](img/grafana-dashboard-import.png) -3. Import the [published dashboards](https://grafana.com/grafana/dashboards/) by ID, or import the dashboards by JSON at *deploy/grafana/dashboards/*. +3. Import the [published dashboards](https://grafana.com/grafana/dashboards/) by ID [18814](https://grafana.com/grafana/dashboards/18814-kubernetes-networking-clusters/) 4. The Grafana dashboard should now be visible. @@ -27,7 +26,9 @@ Make sure that you're still port-forwarding your server to localhost:9090, or co ## Pre-Installed Dashboards -If you're using [Azure-Hosted Prometheus/Grafana](prometheus-azure-managed.md), versions of these dashbaords are pre-installed under: +ID: [18814](https://grafana.com/grafana/dashboards/18814-kubernetes-networking-clusters/) + +If you're using above, versions of these dashbaords are pre-installed under: - Dashboards > Managed Prometheus > Kubernetes / Networking / Clusters - Dashboards > Managed Prometheus > Kubernetes / Networking / DNS diff --git a/docs/installation/prometheus-azure-managed.md b/docs/installation/prometheus-azure-managed.md deleted file mode 100644 index ca5122c5a0..0000000000 --- a/docs/installation/prometheus-azure-managed.md +++ /dev/null @@ -1,81 +0,0 @@ -# Azure Managed Prometheus/Grafana - -## Pre-Requisites - -1. Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). -2. [Create an AKS cluster](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli#create-a-resource-group). -3. Install Retina DaemonSet (see [Quick Installation](./setup.md)). - -## Deploying Prometheus and Grafana - -1. Create an Azure Monitor resource: - - ```shell - az resource create \ - --resource-group $RESOURCE_GROUP \ - --namespace microsoft.monitor \ - --resource-type accounts \ - --name $AZURE_MONITOR_NAME \ - --location $REGION \ - --properties '{}' - ``` - -2. Create a Grafana instance: - - ```shell - az grafana create \ - --name $GRAFANA_NAME \ - --resource-group $RESOURCE_GROUP - ``` - -3. Get the Azure Monitor and Grafana resource IDs - - ```bash - export AZMON_RESOURCE_ID=$(az resource show --resource-group $RESOURCE_GROUP --name $AZURE_MONITOR_NAME --resource-type "Microsoft.Monitor/accounts" --query id -o tsv) - export GRAFANA_RESOURCE_ID=$(az resource show --resource-group $RESOURCE_GROUP --name $GRAFANA_NAME --resource-type "microsoft.dashboard/grafana" --query id -o tsv) - ``` - -4. Link both the Azure Monitor Workspace and Grafana instance to your cluster: - - ```shell - az aks update --enable-azure-monitor-metrics \ - -n $NAME \ - -g $RESOURCE_GROUP \ - --azure-monitor-workspace-resource-id $AZMON_RESOURCE_ID \ - --grafana-resource-id $GRAFANA_RESOURCE_ID - ``` - -5. Verify that the Azure Monitor Pods are running. For example: - - ```shell - kubectl get pod -n kube-system - ``` - - ```shell - NAME READY STATUS RESTARTS AGE - ama-metrics-5bc6c6d948-zkgc9 2/2 Running 0 26h - ama-metrics-ksm-556d86b5dc-2ndkv 1/1 Running 0 26h - ama-metrics-node-lbwcj 2/2 Running 0 26h - ama-metrics-node-rzkzn 2/2 Running 0 26h - ama-metrics-win-node-gqnkw 2/2 Running 0 26h - ama-metrics-win-node-tkrm8 2/2 Running 0 26h - ``` - -6. Verify that the Retina Pods are discovered by port-forwarding an AMA node Pod: - - ```bash - kubectl port-forward -n kube-system $(kubectl get pod -n kube-system -l dsName=ama-metrics-node -o name | head -n 1) 9090:9090 - ``` - - ```bash - Forwarding from 127.0.0.1:9090 -> 9090 - Forwarding from [::1]:9090 -> 9090 - ``` - -7. Then go to [http://localhost:9090/targets](http://localhost:9090/targets) to see the Retina Pods being discovered and scraped: - - ![alt text](img/prometheus-retina-pods.png) - -## Configuring Grafana - -In the Azure Portal, find your Grafana instance. Click on the Grafana Endpoint URL, then follow [Configuring Grafana](./grafana.md). diff --git a/docs/installation/setup.md b/docs/installation/setup.md index fe41a730f3..1ab4c73882 100644 --- a/docs/installation/setup.md +++ b/docs/installation/setup.md @@ -36,13 +36,4 @@ make helm-install-advanced-local-context ## Next Steps: Configuring Prometheus/Grafana -Follow the guide relevant to your setup: - - [Unmanaged Prometheus/Grafana](./prometheus-unmanaged.md) -- [Azure-Hosted Prometheus/Grafana](./prometheus-azure-managed.md) - -## Managed Solutions - -For a managed experience, eliminating the need to manage helm charts, see these options: - -- [Azure-Managed Installation](https://learn.microsoft.com/en-us/azure/aks/network-observability-managed-cli?tabs=non-cilium) diff --git a/docs/metrics/annotations.md b/docs/metrics/annotations.md index 5d635eafc3..ea7b0d3891 100644 --- a/docs/metrics/annotations.md +++ b/docs/metrics/annotations.md @@ -3,7 +3,7 @@ Annotations let you specify which Pods to observe (create metrics for). To configure this, specify `enableAnnotations=true` in Retina's [helm installation](../installation/setup.md) or [ConfigMap](../installation/config.md). -You can then add the annotation `retina.sh/v1alpha1: observe` to either: +You can then add the annotation `retina.sh: observe` to either: - individual Pods - Namespaces (to observe all the Pods in the namespace). diff --git a/hack/tools/kapinger/Dockerfile b/hack/tools/kapinger/Dockerfile deleted file mode 100644 index 5230c6d297..0000000000 --- a/hack/tools/kapinger/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder - -WORKDIR /app -COPY ./hack/tools . -RUN go mod download - -RUN CGO_ENABLED=0 GOOS=linux go build -o app ./kapinger - -FROM scratch -WORKDIR /app -COPY --from=builder /app/app . -CMD ["./app"] diff --git a/hack/tools/kapinger/Dockerfile.windows b/hack/tools/kapinger/Dockerfile.windows deleted file mode 100644 index 692e687f19..0000000000 --- a/hack/tools/kapinger/Dockerfile.windows +++ /dev/null @@ -1,16 +0,0 @@ -FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder - -WORKDIR /app -COPY ./hack/tools . -ENV GOOS=windows -ENV GOARCH=amd64 - -RUN echo "building kapinger for OS: $GOOS, ARCH: $GOARCH" -RUN go mod download - -RUN CGO_ENABLED=0 go build -o app.exe ./kapinger - -FROM mcr.microsoft.com/windows/nanoserver:ltsc2022 -WORKDIR /app -COPY --from=builder /app/app.exe . -CMD ["app.exe"] diff --git a/hack/tools/kapinger/clients/http.go b/hack/tools/kapinger/clients/http.go deleted file mode 100644 index fd2f2f74f8..0000000000 --- a/hack/tools/kapinger/clients/http.go +++ /dev/null @@ -1,161 +0,0 @@ -package clients - -import ( - "context" - "fmt" - "io" - "log" - "net/http" - "os" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -type TargetType string - -const ( - Service TargetType = "service" - Pod TargetType = "pod" - - envTargetType = "TARGET_TYPE" -) - -type KapingerHTTPClient struct { - client http.Client - clientset *kubernetes.Clientset - labelselector string - ips []string - port int - targettype TargetType -} - -func NewKapingerHTTPClient(clientset *kubernetes.Clientset, labelselector string, httpPort int) (*KapingerHTTPClient, error) { - k := KapingerHTTPClient{ - client: http.Client{ - Transport: &http.Transport{ - DisableKeepAlives: true, - }, - Timeout: 3 * time.Second, - }, - labelselector: labelselector, - clientset: clientset, - port: httpPort, - } - - targettype := os.Getenv(envTargetType) - if targettype != "" { - k.targettype = TargetType(targettype) - } else { - k.targettype = Service - } - - err := k.getIPS() - if err != nil { - return nil, fmt.Errorf("error getting IPs: %w", err) - } - - return &k, nil -} - -func (k *KapingerHTTPClient) MakeRequest() error { - for _, ip := range k.ips { - url := fmt.Sprintf("http://%s:%d", ip, k.port) - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return err - } - - // Set the "Connection" header to "close" - req.Header.Set("Connection", "close") - - // Send the request - resp, err := k.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - log.Fatalf("Error reading response body from %s: %v", url, err) - return err - } - log.Printf("Response from %s: %s\n", url, string(body)) - } - return nil -} - -func (k *KapingerHTTPClient) getIPS() error { - ips := []string{} - - switch k.targettype { - case Service: - services, err := k.clientset.CoreV1().Services(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{ - LabelSelector: k.labelselector, - }) - if err != nil { - return fmt.Errorf("error getting services: %w", err) - } - - // Extract the Service cluster IP addresses - - for _, svc := range services.Items { - ips = append(ips, svc.Spec.ClusterIP) - } - log.Println("using service IPs:", ips) - - case Pod: - err := waitForPodsRunning(k.clientset, k.labelselector) - if err != nil { - return fmt.Errorf("error waiting for pods to be in Running state: %w", err) - } - - // Get all pods in the cluster with label app=agnhost - pods, err := k.clientset.CoreV1().Pods(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{ - LabelSelector: k.labelselector, - }) - if err != nil { - return fmt.Errorf("error getting pods: %w", err) - } - - for _, pod := range pods.Items { - ips = append(ips, pod.Status.PodIP) - } - - log.Printf("using pod IPs: %v", ips) - default: - return fmt.Errorf("env TARGET_TYPE must be \"service\" or \"pod\"") - } - - k.ips = ips - return nil -} - -// waitForPodsRunning waits for all pods with the specified label to be in the Running phase -func waitForPodsRunning(clientset *kubernetes.Clientset, labelSelector string) error { - return wait.ExponentialBackoff(wait.Backoff{ - Duration: 5 * time.Second, - Factor: 1.5, - }, func() (bool, error) { - pods, err := clientset.CoreV1().Pods(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - if err != nil { - log.Printf("error getting pods: %v", err) - return false, nil - } - - for _, pod := range pods.Items { - if pod.Status.Phase != corev1.PodRunning { - log.Printf("waiting for pod %s to be in Running state (currently %s)", pod.Name, pod.Status.Phase) - return false, nil - } - } - - return true, nil - }) -} diff --git a/hack/tools/kapinger/main.go b/hack/tools/kapinger/main.go deleted file mode 100644 index 2f1a9d6da1..0000000000 --- a/hack/tools/kapinger/main.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "log" - "math/rand" - "os" - "strconv" - "time" - - "github.com/microsoft/retina/hack/tools/kapinger/clients" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -const ( - delay = 500 * time.Millisecond -) - -func main() { - log.Printf("starting kapinger...") - clientset, err := getKubernetesClientSet() - if err != nil { - log.Fatal(err) - } - - httpPort, err := strconv.Atoi(os.Getenv(envHTTPPort)) - if err != nil { - httpPort = httpport - log.Printf("HTTP_PORT not set, defaulting to port %d\n", httpport) - } - - go StartServers() - - // Create an HTTP client with the custom Transport - client, err := clients.NewKapingerHTTPClient(clientset, "app=kapinger", httpPort) - if err != nil { - log.Fatal(err) - } - - // Initialize the random number generator with a seed based on the current time - rand.New(rand.NewSource(time.Now().UnixNano())) - - // Generate a random number between 1 and 1000 for delay jitter - jitter := rand.Intn(100) + 1 - time.Sleep(time.Duration(jitter) * time.Millisecond) - - for { - err := client.MakeRequest() - if err != nil { - log.Printf("error making request: %v", err) - } - time.Sleep(delay) - } -} - -func getKubernetesClientSet() (*kubernetes.Clientset, error) { - // Use the in-cluster configuration - config, err := rest.InClusterConfig() - if err != nil { - log.Printf("error getting in-cluster config: %v", err) - } - - // Create a Kubernetes clientset using the in-cluster configuration - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - log.Printf("error creating clientset: %v", err) - } - return clientset, err -} diff --git a/hack/tools/kapinger/manifests/deploy.yaml b/hack/tools/kapinger/manifests/deploy.yaml deleted file mode 100644 index 157cf7ae92..0000000000 --- a/hack/tools/kapinger/manifests/deploy.yaml +++ /dev/null @@ -1,232 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kapinger-sa - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: kapinger-role - namespace: default -rules: - - apiGroups: [""] - resources: ["services", "pods"] - verbs: ["get", "list"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kapinger-rolebinding - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kapinger-role -subjects: - - kind: ServiceAccount - name: kapinger-sa - namespace: default ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kapinger - namespace: default -spec: - replicas: 2 - selector: - matchLabels: - app: kapinger - template: - metadata: - labels: - app: kapinger - spec: - serviceAccountName: kapinger-sa - nodeSelector: - "kubernetes.io/os": linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 50 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - kapinger - topologyKey: "kubernetes.io/hostname" - containers: - - name: kapinger - image: ghcr.io/microsoft/retina/kapinger:latest - resources: - limits: - memory: 20Mi - requests: - memory: 20Mi - env: - - name: TARGET_TYPE - value: "service" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: HTTP_PORT - value: "8080" - - name: TCP_PORT - value: "8085" - - name: UDP_PORT - value: "8086" - ports: - - containerPort: 8080 - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kapinger-win - namespace: default -spec: - replicas: 5 - selector: - matchLabels: - app: kapinger - template: - metadata: - labels: - app: kapinger - spec: - serviceAccountName: kapinger-sa - nodeSelector: - "kubernetes.io/os": windows - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - kapinger - topologyKey: "kubernetes.io/hostname" - containers: - - name: kapinger - image: ghcr.io/microsoft/retina/kapinger:windows-ltsc2022-amd64-v35 - env: - - name: TARGET_TYPE - value: "service" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: HTTP_PORT - value: "8080" - - name: TCP_PORT - value: "8085" - - name: UDP_PORT - value: "8086" - ports: - - containerPort: 8080 - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kapinger-drop - namespace: default -spec: - replicas: 0 - selector: - matchLabels: - app: kapinger-drop - template: - metadata: - labels: - app: kapinger-drop - spec: - serviceAccountName: kapinger-sa - nodeSelector: - "kubernetes.io/os": linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - kapinger - topologyKey: "kubernetes.io/hostname" - containers: - - name: kapinger - image: ghcr.io/microsoft/retina/kapinger:latest - resources: - limits: - memory: 20Mi - requests: - memory: 20Mi - env: - - name: TARGET_TYPE - value: "service" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: HTTP_PORT - value: "8080" - - name: TCP_PORT - value: "8085" - - name: UDP_PORT - value: "8086" - ports: - - containerPort: 8080 - ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: deny-traffic-to-kapinger-drop - namespace: default -spec: - podSelector: - matchLabels: - app: kapinger-drop - policyTypes: - - Ingress - - Egress - ingress: [] - egress: [] ---- -apiVersion: v1 -kind: Service -metadata: - name: kapinger-service - namespace: default - labels: - app: kapinger -spec: - selector: - app: kapinger - ports: - - protocol: TCP - port: 8080 - targetPort: 8080 - type: ClusterIP diff --git a/hack/tools/kapinger/server.go b/hack/tools/kapinger/server.go deleted file mode 100644 index 99bc586b7d..0000000000 --- a/hack/tools/kapinger/server.go +++ /dev/null @@ -1,72 +0,0 @@ -package main - -import ( - "context" - "log" - "os" - "strconv" - - "github.com/microsoft/retina/hack/tools/kapinger/servers" -) - -const ( - httpport = 8080 - tcpport = 8085 - udpport = 8086 - - envHTTPPort = "HTTP_PORT" - envTCPPort = "TCP_PORT" - envUDPPort = "UDP_PORT" -) - -type Server interface { - Start(ctx context.Context) error -} - -type Kapinger struct { - servers []Server -} - -func (k *Kapinger) Start(ctx context.Context) { - for i := range k.servers { - go func(i int) { - err := k.servers[i].Start(ctx) - if err != nil { - log.Printf("Error starting server: %s\n", err) - } - }(i) - } - <-ctx.Done() -} - -func StartServers() { - tcpPort, err := strconv.Atoi(os.Getenv(envTCPPort)) - if err != nil { - tcpPort = tcpport - log.Printf("TCP_PORT not set, defaulting to port %d\n", tcpport) - } - - udpPort, err := strconv.Atoi(os.Getenv(envUDPPort)) - if err != nil { - udpPort = udpport - log.Printf("UDP_PORT not set, defaulting to port %d\n", udpport) - } - - httpPort, err := strconv.Atoi(os.Getenv(envHTTPPort)) - if err != nil { - httpPort = httpport - log.Printf("HTTP_PORT not set, defaulting to port %d\n", httpport) - } - - k := &Kapinger{ - servers: []Server{ - servers.NewKapingerTCPServer(tcpPort), - servers.NewKapingerUDPServer(udpPort), - servers.NewKapingerHTTPServer(httpPort), - }, - } - - // cancel - ctx := context.Background() - k.Start(ctx) -} diff --git a/hack/tools/kapinger/servers/http.go b/hack/tools/kapinger/servers/http.go deleted file mode 100644 index 5810c5e4d5..0000000000 --- a/hack/tools/kapinger/servers/http.go +++ /dev/null @@ -1,52 +0,0 @@ -package servers - -import ( - "context" - "fmt" - "log" - "net/http" - "strconv" -) - -type KapingerHTTPServer struct { - port int -} - -func NewKapingerHTTPServer(port int) *KapingerHTTPServer { - return &KapingerHTTPServer{ - port: port, - } -} - -func (k *KapingerHTTPServer) Start(ctx context.Context) error { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, err := w.Write(getResponse(r.RemoteAddr, "http")) - if err != nil { - fmt.Println(err) - } - }) - - addr := ":" + strconv.Itoa(k.port) - - log.Printf("[HTTP] Listening on %+v\n", addr) - - server := &http.Server{ - Addr: addr, - Handler: http.HandlerFunc(handler), - } - - go func() { - err := server.ListenAndServe() - if err != nil { - panic(err) - } - }() - - <-ctx.Done() - err := server.Shutdown(ctx) - if err != nil { - return err - } - - return nil -} diff --git a/hack/tools/kapinger/servers/tcp.go b/hack/tools/kapinger/servers/tcp.go deleted file mode 100644 index 7f9f200aa9..0000000000 --- a/hack/tools/kapinger/servers/tcp.go +++ /dev/null @@ -1,61 +0,0 @@ -package servers - -import ( - "context" - "fmt" - "log" - "net" -) - -const ( - tcp = "tcp" -) - -type KapingerTCPServer struct { - port int -} - -func NewKapingerTCPServer(port int) *KapingerTCPServer { - return &KapingerTCPServer{ - port: port, - } -} - -func (k *KapingerTCPServer) Start(ctx context.Context) error { - listener, err := net.ListenTCP(tcp, &net.TCPAddr{Port: k.port}) - if err != nil { - fmt.Println(err) - return nil - } - defer listener.Close() - - log.Printf("[TCP] Listening on %+v\n", listener.Addr().String()) - - for { - select { - case <-ctx.Done(): - fmt.Println("Exiting TCP server") - return nil - default: - connection, err := listener.Accept() - if err != nil { - fmt.Println(err) - return err - } - handleConnection(connection) - } - } -} - -func handleConnection(connection net.Conn) { - addressString := fmt.Sprintf("%+v", connection.RemoteAddr()) - _, err := connection.Write(getResponse(addressString, tcp)) - if err != nil { - fmt.Println(err) - } - - err = connection.Close() - if err != nil { - fmt.Println(err) - } -} diff --git a/hack/tools/kapinger/servers/udp.go b/hack/tools/kapinger/servers/udp.go deleted file mode 100644 index 19152beb10..0000000000 --- a/hack/tools/kapinger/servers/udp.go +++ /dev/null @@ -1,62 +0,0 @@ -package servers - -import ( - "context" - "fmt" - "log" - "net" - "strings" -) - -const ( - udp = "udp" -) - -type KapingerUDPServer struct { - buffersize int - port int -} - -func NewKapingerUDPServer(port int) *KapingerUDPServer { - return &KapingerUDPServer{ - buffersize: 1024, - port: port, - } -} - -func (k *KapingerUDPServer) Start(ctx context.Context) error { - connection, err := net.ListenUDP(udp, &net.UDPAddr{Port: k.port}) - if err != nil { - fmt.Println(err) - return err - } - log.Printf("[UDP] Listening on %+v\n", connection.LocalAddr().String()) - - defer connection.Close() - buffer := make([]byte, k.buffersize) - - for { - select { - case <-ctx.Done(): - fmt.Println("Exiting UDP server") - return nil - default: - n, addr, err := connection.ReadFromUDP(buffer) - if err != nil { - fmt.Println(err) - } - payload := strings.TrimSpace(string(buffer[0 : n-1])) - - if payload == "STOP" { - fmt.Println("Exiting UDP server") - return nil - } - - addressString := fmt.Sprintf("%+v", addr) - _, err = connection.WriteToUDP(getResponse(addressString, udp), addr) - if err != nil { - return fmt.Errorf("error writing to UDP connection: %w", err) - } - } - } -} diff --git a/hack/tools/kapinger/servers/util.go b/hack/tools/kapinger/servers/util.go deleted file mode 100644 index 497c90071e..0000000000 --- a/hack/tools/kapinger/servers/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package servers - -import ( - "fmt" - "os" -) - -func getResponse(addressString, protocol string) []byte { - podname := os.Getenv("POD_NAME") - return []byte(fmt.Sprintf("connected to: %s via %s, connected from: %v", podname, protocol, addressString)) -} diff --git a/pkg/plugin/linuxutil/Makefile b/pkg/plugin/linuxutil/Makefile index 7e90ff976f..ae7d95223b 100644 --- a/pkg/plugin/linuxutil/Makefile +++ b/pkg/plugin/linuxutil/Makefile @@ -5,7 +5,7 @@ MOCKGEN = $(TOOLS_BIN_DIR)/mockgen .PHONY: generate generate: $(MOCKGEN) ## Generate mock clients - $(MOCKGEN) -source=$(REPO_ROOT)/pkg/plugin/linuxutil/types.go -copyright_file=$(REPO_ROOT)/pkg/lib/ignore_headers.txt -package=linuxutil > linuxutil_mock_generated.go + $(MOCKGEN) -source=$(REPO_ROOT)/pkg/plugin/linuxutil/types_linux.go -copyright_file=$(REPO_ROOT)/pkg/lib/ignore_headers.txt -package=linuxutil > linuxutil_mock_generated.go $(MOCKGEN): @make -C $(REPO_ROOT) $(MOCKGEN) diff --git a/pkg/plugin/linuxutil/linuxutil_mock_generated.go b/pkg/plugin/linuxutil/linuxutil_mock_generated.go index 1c9f026dac..a08f7c1254 100644 --- a/pkg/plugin/linuxutil/linuxutil_mock_generated.go +++ b/pkg/plugin/linuxutil/linuxutil_mock_generated.go @@ -5,7 +5,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /home/enterprise/retina/pkg/plugin/linuxutil/types.go +// Source: /home/anubhab/github/forks/retina/pkg/plugin/linuxutil/types_linux.go // Package linuxutil is a generated GoMock package. package linuxutil diff --git a/pkg/plugin/packetparser/_cprog/dynamic.h b/pkg/plugin/packetparser/_cprog/dynamic.h new file mode 100644 index 0000000000..80abbd931f --- /dev/null +++ b/pkg/plugin/packetparser/_cprog/dynamic.h @@ -0,0 +1,2 @@ +// Place holder header file that will be replaced by the actual header file during runtime +// DO NOT DELETE diff --git a/site/sidebars.js b/site/sidebars.js index a234b29fa9..1c56d28cd3 100644 --- a/site/sidebars.js +++ b/site/sidebars.js @@ -26,7 +26,6 @@ const sidebars = { items: [ 'installation/setup', 'installation/prometheus-unmanaged', - 'installation/prometheus-azure-managed', 'installation/grafana', 'installation/cli', 'installation/config',