diff --git a/notebooks/Experimental/Network.ipynb b/notebooks/Experimental/Network.ipynb new file mode 100644 index 00000000000..7a1f3f257dc --- /dev/null +++ b/notebooks/Experimental/Network.ipynb @@ -0,0 +1,209 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "bd9a2226-3e53-4f27-9213-75a8c3ff9176", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fddf8d07-d154-4284-a27b-d74e35d3f851", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client = sy.login(\n", + " url=\"http://localhost\", port=9081, email=\"info@openmined.org\", password=\"changethis\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7b106d-b784-45d8-b54d-4ce2de2da453", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = sy.login(\n", + " url=\"http://localhost\", port=9082, email=\"info@openmined.org\", password=\"changethis\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff504949-620d-4e26-beee-0d39e0e502eb", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.connect_to_gateway(gateway_client, reverse_tunnel=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba7bc71a-4e6a-4429-9588-7b3d0ed19e27", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client.api.services.request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b4984e1-331e-4fd8-b012-768fc613f48a", + "metadata": {}, + "outputs": [], + "source": [ + "# gateway_client.api.services.request[0].approve()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90dc44bd", + "metadata": {}, + "outputs": [], + "source": [ + "node_peers = gateway_client.api.network.get_all_peers()\n", + "node_peers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8c06aaa6-4157-42d1-959f-9d47722a3420", + "metadata": {}, + "outputs": [], + "source": [ + "node_peer = gateway_client.api.network.get_all_peers()[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb63a77b", + "metadata": {}, + "outputs": [], + "source": [ + "node_peer.node_routes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61882e86", + "metadata": {}, + "outputs": [], + "source": [ + "node_peer.node_routes[0].__dict__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb19dbc6-869b-46dc-92e3-5e75ee6d0b06", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.api.network.get_all_peers()[0].node_routes[0].__dict__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32d09a51", + "metadata": {}, + "outputs": [], + "source": [ + "# node_peer.client_with_key(sy.SyftSigningKey.generate())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b7d9e41d", + "metadata": {}, + "outputs": [], + "source": [ + "# gateway_client.api.network.delete_route(node_peer.verify_key, node_peer.node_routes[1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8fa24ec7", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a081250-abc3-43a3-9e06-ff0c3a362ebf", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client.peers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6fedfe4-9362-47c9-9342-5cf6eacde8ab", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client_proxy = gateway_client.peers[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f1940e00-0337-4b56-88c2-d70f397a7016", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client_proxy.connection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "613125c5-6321-4238-852c-ff0cfcd9526a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.1.-1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/grid/backend/grid/core/config.py b/packages/grid/backend/grid/core/config.py index 8c55b8cd3f7..b6f5ddf9067 100644 --- a/packages/grid/backend/grid/core/config.py +++ b/packages/grid/backend/grid/core/config.py @@ -155,6 +155,9 @@ def get_emails_enabled(self) -> Self: ASSOCIATION_REQUEST_AUTO_APPROVAL: bool = str_to_bool( os.getenv("ASSOCIATION_REQUEST_AUTO_APPROVAL", "False") ) + REVERSE_TUNNEL_ENABLED: bool = str_to_bool( + os.getenv("REVERSE_TUNNEL_ENABLED", "false") + ) model_config = SettingsConfigDict(case_sensitive=True) diff --git a/packages/grid/default.env b/packages/grid/default.env index 6ae9748bfef..906251ee865 100644 --- a/packages/grid/default.env +++ b/packages/grid/default.env @@ -21,6 +21,7 @@ TRAEFIK_PUBLIC_TAG=traefik-public STACK_NAME=grid-openmined-org DOCKER_IMAGE_BACKEND=openmined/grid-backend DOCKER_IMAGE_FRONTEND=openmined/grid-frontend +DOCKER_IMAGE_RATHOLE=openmined/grid-rathole DOCKER_IMAGE_TRAEFIK=traefik TRAEFIK_VERSION=v2.11.0 REDIS_VERSION=6.2 @@ -109,3 +110,6 @@ ENABLE_SIGNUP=False # Enclave Attestation DOCKER_IMAGE_ENCLAVE_ATTESTATION=openmined/grid-enclave-attestation + +# Rathole Config +RATHOLE_PORT=2333 \ No newline at end of file diff --git a/packages/grid/devspace.yaml b/packages/grid/devspace.yaml index 6e12cf245cc..8bbf3487daf 100644 --- a/packages/grid/devspace.yaml +++ b/packages/grid/devspace.yaml @@ -25,6 +25,7 @@ vars: DOCKER_IMAGE_BACKEND: openmined/grid-backend DOCKER_IMAGE_FRONTEND: openmined/grid-frontend DOCKER_IMAGE_SEAWEEDFS: openmined/grid-seaweedfs + DOCKER_IMAGE_RATHOLE: openmined/grid-rathole DOCKER_IMAGE_ENCLAVE_ATTESTATION: openmined/grid-enclave-attestation CONTAINER_REGISTRY: "docker.io" VERSION: "0.8.7-beta.13" @@ -67,15 +68,16 @@ deployments: releaseName: syft-dev chart: name: ./helm/syft + # values that need to be templated go here values: global: registry: ${CONTAINER_REGISTRY} version: dev-${DEVSPACE_TIMESTAMP} - node: - type: domain # required for the gateway profile - # anything that does not need devspace $env vars should go in values.dev.yaml + node: {} + # anything that does not need templating should go in helm/examples/dev/base.yaml + # or profile specific values files valuesFiles: - - ./helm/values.dev.yaml + - ./helm/examples/dev/base.yaml dev: mongo: @@ -115,46 +117,84 @@ dev: localPort: 3480 profiles: - - name: dev-low + - name: domain-low + description: "Deploy a low-side domain" patches: - op: add path: deployments.syft.helm.values.node value: side: low - - name: dev-high + + - name: domain-tunnel + description: "Deploy a domain with tunneling enabled" patches: + # enable rathole image - op: add - path: deployments.syft.helm.values.node + path: images value: - side: high + rathole: + image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_RATHOLE}" + buildKit: + args: ["--platform", "linux/${PLATFORM}"] + dockerfile: ./rathole/rathole.dockerfile + context: ./rathole + tags: + - dev-${DEVSPACE_TIMESTAMP} + # use rathole client-specific chart values + - op: add + path: deployments.syft.helm.valuesFiles + value: ./helm/examples/dev/domain.tunnel.yaml - name: gateway + description: "Deploy a Gateway Node with tunnel enabled" patches: - - op: replace - path: deployments.syft.helm.values.node.type - value: "gateway" + # enable rathole image + - op: add + path: images + value: + rathole: + image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_RATHOLE}" + buildKit: + args: ["--platform", "linux/${PLATFORM}"] + dockerfile: ./rathole/rathole.dockerfile + context: ./rathole + tags: + - dev-${DEVSPACE_TIMESTAMP} + # enable rathole `devspace dev` config + - op: add + path: dev + value: + rathole: + labelSelector: + app.kubernetes.io/name: syft + app.kubernetes.io/component: rathole + ports: + - port: "2333" + # use gateway-specific chart values + - op: add + path: deployments.syft.helm.valuesFiles + value: ./helm/examples/dev/gateway.yaml + # remove unused images - op: remove path: images.seaweedfs - op: remove path: dev.seaweedfs - # Port Re-Mapping - # Mongo - op: replace path: dev.mongo.ports[0].port value: 27018:27017 - - # Backend - op: replace path: dev.backend.ports[0].port value: 5679:5678 - - # Backend Container SSH - op: replace path: dev.backend.containers.backend-container.ssh.localPort value: 3481 + - op: replace + path: dev.rtunnel.ports[0].port + value: 2334:2333 - name: gcp + description: "Deploy a high-side domain on GCP" patches: - op: replace path: deployments.syft.helm.valuesFiles @@ -162,6 +202,7 @@ profiles: - ./helm/examples/gcp/gcp.high.yaml - name: gcp-low + description: "Deploy a low-side domain on GCP" patches: - op: replace path: deployments.syft.helm.valuesFiles @@ -169,6 +210,7 @@ profiles: - ./helm/examples/gcp/gcp.low.yaml - name: azure + description: "Deploy a high-side domain on AKS" patches: - op: replace path: deployments.syft.helm.valuesFiles @@ -176,11 +218,9 @@ profiles: - ./helm/examples/azure/azure.high.yaml - name: enclave + description: "Deploy an enclave node" patches: - - op: replace - path: deployments.syft.helm.values.node.type - value: "enclave" - + # enable image build for enclave-attestation - op: add path: images value: @@ -198,29 +238,20 @@ profiles: enclave-attestation: sync: - path: ./enclave/attestation/server:/app/server - + # use gateway-specific chart values - op: add - path: deployments.syft.helm.values - value: - attestation: - enabled: true - + path: deployments.syft.helm.valuesFiles + value: ./helm/examples/dev/enclave.yaml # Port Re-Mapping - # Mongo - op: replace path: dev.mongo.ports[0].port value: 27019:27017 - - # Backend - op: replace path: dev.backend.ports[0].port value: 5680:5678 - - # Backend Container SSH - op: replace path: dev.backend.containers.backend-container.ssh.localPort value: 3482 - - op: replace path: dev.seaweedfs.ports value: diff --git a/packages/grid/helm/values.dev.yaml b/packages/grid/helm/examples/dev/base.yaml similarity index 71% rename from packages/grid/helm/values.dev.yaml rename to packages/grid/helm/examples/dev/base.yaml index c24aa51d294..b81e4847cd8 100644 --- a/packages/grid/helm/values.dev.yaml +++ b/packages/grid/helm/examples/dev/base.yaml @@ -1,15 +1,9 @@ -# Helm chart values used for development and testing -# Can be used through `helm install -f values.dev.yaml` or devspace `valuesFiles` +# Base Helm chart values used for development and testing +# Can be used through `helm install -f packages/grid/helm/examples/dev/base.yaml` or devspace `valuesFiles` global: randomizedSecrets: false -registry: - resourcesPreset: null - resources: null - - storageSize: "5Gi" - node: rootEmail: info@openmined.org associationRequestAutoApproval: true @@ -44,10 +38,21 @@ frontend: resources: null proxy: + enabled: true + resourcesPreset: null resources: null -# attestation: -# enabled: true -# resourcesPreset: null -# resources: null +registry: + enabled: true + + resourcesPreset: null + resources: null + + storageSize: "5Gi" + +rtunnel: + enabled: false + +attestation: + enabled: false diff --git a/packages/grid/helm/examples/dev/domain.tunnel.yaml b/packages/grid/helm/examples/dev/domain.tunnel.yaml new file mode 100644 index 00000000000..cec2e97cc6e --- /dev/null +++ b/packages/grid/helm/examples/dev/domain.tunnel.yaml @@ -0,0 +1,11 @@ +# Values for deploying a domain with a reverse tunnel server in client-mode +# Patched on top of patch `base.yaml` + +# Proxy is required for the tunnel to work +proxy: + enabled: true + +rtunnel: + enabled: true + mode: client + logLevel: debug diff --git a/packages/grid/helm/examples/dev/enclave.yaml b/packages/grid/helm/examples/dev/enclave.yaml new file mode 100644 index 00000000000..2951da06b05 --- /dev/null +++ b/packages/grid/helm/examples/dev/enclave.yaml @@ -0,0 +1,8 @@ +# Values for deploying an enclave +# Patched on top of patch `base.yaml` + +node: + type: enclave + +attestation: + enabled: true diff --git a/packages/grid/helm/examples/dev/gateway.yaml b/packages/grid/helm/examples/dev/gateway.yaml new file mode 100644 index 00000000000..e0916c98c21 --- /dev/null +++ b/packages/grid/helm/examples/dev/gateway.yaml @@ -0,0 +1,14 @@ +# Values for deploying a gateway with a reverse tunnel server +# Patched on top of patch `base.yaml` + +node: + type: gateway + +# Proxy is required for the tunnel to work +proxy: + enabled: true + +rtunnel: + enabled: true + mode: server + logLevel: debug diff --git a/packages/grid/helm/syft/templates/backend/backend-service-account.yaml b/packages/grid/helm/syft/templates/backend/backend-service-account.yaml index a466d0c3fe4..76d70afee70 100644 --- a/packages/grid/helm/syft/templates/backend/backend-service-account.yaml +++ b/packages/grid/helm/syft/templates/backend/backend-service-account.yaml @@ -26,7 +26,7 @@ metadata: app.kubernetes.io/component: backend rules: - apiGroups: [""] - resources: ["pods", "configmaps", "secrets"] + resources: ["pods", "configmaps", "secrets", "services"] verbs: ["create", "get", "list", "watch", "update", "patch", "delete"] - apiGroups: [""] resources: ["pods/log"] diff --git a/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml b/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml index be0a35d6245..b86916d6bde 100644 --- a/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml +++ b/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml @@ -86,9 +86,17 @@ spec: {{- if .Values.node.debuggerEnabled }} - name: DEBUGGER_ENABLED value: "true" + {{- end }} + {{- if eq .Values.node.type "gateway" }} - name: ASSOCIATION_REQUEST_AUTO_APPROVAL value: {{ .Values.node.associationRequestAutoApproval | quote }} {{- end }} + {{- if .Values.rtunnel.enabled }} + - name: RATHOLE_PORT + value: {{ .Values.rtunnel.port | quote }} + - name: REVERSE_TUNNEL_ENABLED + value: "true" + {{- end }} # MongoDB - name: MONGO_PORT value: {{ .Values.mongo.port | quote }} diff --git a/packages/grid/helm/syft/templates/proxy/proxy-configmap.yaml b/packages/grid/helm/syft/templates/proxy/proxy-configmap.yaml index 2ebc6d6a9ba..1bcdff49876 100644 --- a/packages/grid/helm/syft/templates/proxy/proxy-configmap.yaml +++ b/packages/grid/helm/syft/templates/proxy/proxy-configmap.yaml @@ -22,19 +22,28 @@ data: loadBalancer: servers: - url: "http://seaweedfs:8333" + rathole: + loadBalancer: + servers: + - url: "http://rathole:2333" routers: + rathole: + rule: "PathPrefix(`/`) && Headers(`Upgrade`, `websocket`) && !PathPrefix(`/rtunnel`)" + entryPoints: + - "web" + service: "rathole" frontend: - rule: "PathPrefix(`/`)" + rule: "PathPrefix(`/`) && !PathPrefix(`/rtunnel`)" entryPoints: - "web" service: "frontend" backend: - rule: "PathPrefix(`/api`) || PathPrefix(`/docs`) || PathPrefix(`/redoc`)" + rule: "(PathPrefix(`/api`) || PathPrefix(`/docs`) || PathPrefix(`/redoc`)) && !PathPrefix(`/rtunnel`)" entryPoints: - "web" service: "backend" blob-storage: - rule: "PathPrefix(`/blob`)" + rule: "PathPrefix(`/blob`) && !PathPrefix(`/rtunnel`)" entryPoints: - "web" service: "seaweedfs" @@ -72,5 +81,18 @@ data: providers: file: - filename: /etc/traefik/dynamic.yml -{{- end }} + directory: /etc/traefik/ + watch: true + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: proxy-config-dynamic + labels: + {{- include "common.labels" . | nindent 4 }} + app.kubernetes.io/component: proxy +data: + rathole-dynamic.yml: | +{{- end }} \ No newline at end of file diff --git a/packages/grid/helm/syft/templates/proxy/proxy-deployment.yaml b/packages/grid/helm/syft/templates/proxy/proxy-deployment.yaml index 69b60f905ab..120944e6504 100644 --- a/packages/grid/helm/syft/templates/proxy/proxy-deployment.yaml +++ b/packages/grid/helm/syft/templates/proxy/proxy-deployment.yaml @@ -57,7 +57,11 @@ spec: readinessProbe: null terminationGracePeriodSeconds: 5 volumes: - - configMap: - name: proxy-config - name: traefik-conf + - name: traefik-conf + projected: + sources: + - configMap: + name: proxy-config + - configMap: + name: proxy-config-dynamic {{- end }} diff --git a/packages/grid/helm/syft/templates/rathole/rathole-configmap.yaml b/packages/grid/helm/syft/templates/rathole/rathole-configmap.yaml new file mode 100644 index 00000000000..77f2bec4c4b --- /dev/null +++ b/packages/grid/helm/syft/templates/rathole/rathole-configmap.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rtunnel.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: rathole-config + resourceVersion: "" + labels: + {{- include "common.labels" . | nindent 4 }} + app.kubernetes.io/component: rathole +data: + {{- if eq .Values.rtunnel.mode "server" }} + server.toml: | + [server] + bind_addr = "0.0.0.0:2333" + + [server.transport] + type = "websocket" + [server.transport.websocket] + tls = false + {{- end }} + + {{- if eq .Values.rtunnel.mode "client" }} + client.toml: | + [client] + remote_addr = "0.0.0.0:2333" + + [client.transport] + type = "websocket" + [client.transport.websocket] + tls = false + {{- end }} +{{- end }} diff --git a/packages/grid/helm/syft/templates/rathole/rathole-service.yaml b/packages/grid/helm/syft/templates/rathole/rathole-service.yaml new file mode 100644 index 00000000000..087da2256e6 --- /dev/null +++ b/packages/grid/helm/syft/templates/rathole/rathole-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rtunnel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: rathole + labels: + {{- include "common.labels" . | nindent 4 }} + app.kubernetes.io/component: rathole +spec: + clusterIP: None + selector: + {{- include "common.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: rathole + ports: + - name: rathole + port: 2333 + targetPort: 2333 + protocol: TCP +{{- end }} \ No newline at end of file diff --git a/packages/grid/helm/syft/templates/rathole/rathole-statefulset.yaml b/packages/grid/helm/syft/templates/rathole/rathole-statefulset.yaml new file mode 100644 index 00000000000..86d39b51551 --- /dev/null +++ b/packages/grid/helm/syft/templates/rathole/rathole-statefulset.yaml @@ -0,0 +1,80 @@ +{{- if .Values.rtunnel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: rathole + labels: + {{- include "common.labels" . | nindent 4 }} + app.kubernetes.io/component: rathole +spec: + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: rathole + serviceName: rathole + podManagementPolicy: OrderedReady + template: + metadata: + labels: + {{- include "common.labels" . | nindent 8 }} + app.kubernetes.io/component: rathole + {{- if .Values.rtunnel.podLabels }} + {{- toYaml .Values.rtunnel.podLabels | nindent 8 }} + {{- end }} + {{- if .Values.rtunnel.podAnnotations }} + annotations: {{- toYaml .Values.rtunnel.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if .Values.rtunnel.nodeSelector }} + nodeSelector: {{- .Values.rtunnel.nodeSelector | toYaml | nindent 8 }} + {{- end }} + containers: + - name: rathole + image: {{ .Values.global.registry }}/openmined/grid-rathole:{{ .Values.global.version }} + imagePullPolicy: Always + resources: {{ include "common.resources.set" (dict "resources" .Values.rtunnel.resources "preset" .Values.rtunnel.resourcesPreset) | nindent 12 }} + env: + - name: LOG_LEVEL + value: {{ .Values.rtunnel.logLevel | quote }} + - name: MODE + value: {{ .Values.rtunnel.mode | quote }} + - name: RATHOLE_PORT + value: {{ .Values.rtunnel.port | quote }} + {{- if .Values.rtunnel.env }} + {{- toYaml .Values.rtunnel.env | nindent 12 }} + {{- end }} + ports: + - name: rathole-port + containerPort: 2333 + startupProbe: null + livenessProbe: null + volumeMounts: + - name: mount-config + mountPath: /conf/ + readOnly: false + - name: rathole-config + mountPath: /app/conf/ + readOnly: false + terminationGracePeriodSeconds: 5 + volumes: + - name: rathole-config + emptyDir: {} + - name: mount-config + configMap: + name: rathole-config + volumeClaimTemplates: + - metadata: + name: rathole-data + labels: + {{- include "common.volumeLabels" . | nindent 8 }} + app.kubernetes.io/component: rathole + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi +{{- end }} \ No newline at end of file diff --git a/packages/grid/helm/syft/values.yaml b/packages/grid/helm/syft/values.yaml index 72907e2aa81..377bd763c54 100644 --- a/packages/grid/helm/syft/values.yaml +++ b/packages/grid/helm/syft/values.yaml @@ -134,6 +134,7 @@ proxy: registry: enabled: true + # Extra environment vars env: null @@ -237,6 +238,31 @@ ingress: # ================================================================================= + +rtunnel: + enabled: false + logLevel: info + + port: 2333 + mode: client + + # Extra environment vars + env: null + + # Pod labels & annotations + podLabels: null + podAnnotations: null + + # Node selector for pods + nodeSelector: null + + # Pod Resource Limits + resourcesPreset: small + resources: null + +# ================================================================================= + + # Enclave attestation Service attestation: enabled: false diff --git a/packages/grid/helm/values.dev.high.yaml b/packages/grid/helm/values.dev.high.yaml deleted file mode 100644 index 9a0e266704a..00000000000 --- a/packages/grid/helm/values.dev.high.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Helm chart values used for development and testing -# Can be used through `helm install -f values.dev.yaml` or devspace `valuesFiles` - -global: - randomizedSecrets: false - -registry: - resourcesPreset: null - resources: null - - storageSize: "5Gi" - -node: - rootEmail: info@openmined.org - side: high - - resourcesPreset: 2xlarge - resources: null - - defaultWorkerPool: - count: 1 - podLabels: null - podAnnotations: null - - secret: - defaultRootPassword: changethis - -mongo: - resourcesPreset: null - resources: null - - secret: - rootPassword: example - -seaweedfs: - resourcesPreset: null - resources: null - - secret: - s3RootPassword: admin - -frontend: - resourcesPreset: null - resources: null - -proxy: - resourcesPreset: null - resources: null diff --git a/packages/grid/helm/values.dev.low.yaml b/packages/grid/helm/values.dev.low.yaml deleted file mode 100644 index 7e5de1a68f2..00000000000 --- a/packages/grid/helm/values.dev.low.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Helm chart values used for development and testing -# Can be used through `helm install -f values.dev.yaml` or devspace `valuesFiles` - -global: - randomizedSecrets: false - -registry: - resourcesPreset: null - resources: null - - storageSize: "5Gi" - -node: - rootEmail: info@openmined.org - side: low - - resourcesPreset: 2xlarge - resources: null - - defaultWorkerPool: - count: 1 - podLabels: null - podAnnotations: null - - secret: - defaultRootPassword: changethis - -mongo: - resourcesPreset: null - resources: null - - secret: - rootPassword: example - -seaweedfs: - resourcesPreset: null - resources: null - - secret: - s3RootPassword: admin - -frontend: - resourcesPreset: null - resources: null - -proxy: - resourcesPreset: null - resources: null diff --git a/packages/grid/rathole/rathole.dockerfile b/packages/grid/rathole/rathole.dockerfile new file mode 100644 index 00000000000..1dee47a9411 --- /dev/null +++ b/packages/grid/rathole/rathole.dockerfile @@ -0,0 +1,26 @@ +ARG RATHOLE_VERSION="0.5.0" +ARG PYTHON_VERSION="3.12" + +FROM rust as build +ARG RATHOLE_VERSION +ARG FEATURES +RUN apt update && apt install -y git +RUN git clone -b v${RATHOLE_VERSION} https://github.com/rapiz1/rathole + +WORKDIR /rathole +RUN cargo build --locked --release --features ${FEATURES:-default} + +FROM python:${PYTHON_VERSION}-slim-bookworm +ARG RATHOLE_VERSION +ENV MODE="client" +ENV LOG_LEVEL="info" +RUN apt update && apt install -y netcat-openbsd vim rsync +COPY --from=build /rathole/target/release/rathole /app/rathole + +WORKDIR /app +COPY ./start.sh /app/start.sh + +EXPOSE 2333/udp +EXPOSE 2333 + +CMD ["sh", "-c", "/app/start.sh"] diff --git a/packages/grid/rathole/start.sh b/packages/grid/rathole/start.sh new file mode 100755 index 00000000000..87111ac8c9f --- /dev/null +++ b/packages/grid/rathole/start.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +MODE=${MODE:-server} +RUST_LOG=${LOG_LEVEL:-trace} + +# Copy configuration files +copy_config() { + cp -L -r -f /conf/* conf/ +} + +# Start the server and reload until healthy +start_server() { + while true; do + RUST_LOG=$RUST_LOG /app/rathole conf/server.toml + status=$? + if [ $status -eq 0 ]; then + break + else + echo "Server failed to start, retrying in 5 seconds..." + sleep 5 + fi + done & +} + +# Start the client +start_client() { + while true; do + RUST_LOG=$RUST_LOG /app/rathole conf/client.toml + status=$? + if [ $status -eq 0 ]; then + break + else + echo "Failed to load client.toml, retrying in 5 seconds..." + sleep 10 + fi + done & +} + +# Reload configuration every 10 seconds +reload_config() { + echo "Starting configuration reload loop..." + while true; do + copy_config + sleep 10 + done +} + +# Make an initial copy of the configuration +copy_config + +if [[ $MODE == "server" ]]; then + start_server +elif [[ $MODE == "client" ]]; then + start_client +else + echo "RATHOLE MODE is set to an invalid value. Exiting." + exit 1 +fi + +# Start the configuration reload in the background to keep the configuration up to date +reload_config diff --git a/packages/grid/traefik/docker/dynamic.yml b/packages/grid/traefik/docker/dynamic.yml index cc6a7bb7ee4..61e68e7ad03 100644 --- a/packages/grid/traefik/docker/dynamic.yml +++ b/packages/grid/traefik/docker/dynamic.yml @@ -20,10 +20,6 @@ http: loadBalancer: servers: - url: "http://seaweedfs:4001" - headscale: - loadBalancer: - servers: - - url: "http://headscale:8080" routers: frontend: rule: "PathPrefix(`/`)" @@ -48,14 +44,6 @@ http: middlewares: - "blob-storage-url" - "blob-storage-host" - vpn: - rule: "PathPrefix(`/vpn`)" - entryPoints: - - web - - vpn - service: "headscale" - middlewares: - - "vpn-url" ping: rule: "PathPrefix(`/ping`)" entryPoints: @@ -73,7 +61,3 @@ http: stripprefix: prefixes: /blob forceslash: true - vpn-url: - stripprefix: - prefixes: /vpn - forceslash: true diff --git a/packages/syft/setup.cfg b/packages/syft/setup.cfg index 6c8ed7d741b..f241a3a6bef 100644 --- a/packages/syft/setup.cfg +++ b/packages/syft/setup.cfg @@ -61,6 +61,8 @@ syft = PyYAML==6.0.1 azure-storage-blob==12.19.1 ipywidgets==8.1.2 + tomli==2.0.1 # Later for python 3.11 > we can just use tomlib that comes with python + tomli_w==1.0.0 rich==13.7.1 jinja2==3.1.4 tenacity==8.3.0 diff --git a/packages/syft/src/syft/client/client.py b/packages/syft/src/syft/client/client.py index eb5b1d1cc44..f153fe88f41 100644 --- a/packages/syft/src/syft/client/client.py +++ b/packages/syft/src/syft/client/client.py @@ -4,6 +4,8 @@ # stdlib import base64 from collections.abc import Callable +from collections.abc import Generator +from collections.abc import Iterable from enum import Enum from getpass import getpass import json @@ -49,7 +51,6 @@ from ..service.user.user_roles import ServiceRole from ..service.user.user_service import UserService from ..types.grid_url import GridURL -from ..types.syft_object import SYFT_OBJECT_VERSION_2 from ..types.syft_object import SYFT_OBJECT_VERSION_3 from ..types.uid import UID from ..util.telemetry import instrument @@ -72,11 +73,6 @@ from ..service.network.node_peer import NodePeer -# use to enable mitm proxy -# from syft.grid.connections.http_connection import HTTPConnection -# HTTPConnection.proxies = {"http": "http://127.0.0.1:8080"} - - def upgrade_tls(url: GridURL, response: Response) -> GridURL: try: if response.url.startswith("https://") and url.protocol == "http": @@ -121,6 +117,7 @@ def forward_message_to_proxy( API_PATH = "/api/v2" DEFAULT_PYGRID_PORT = 80 DEFAULT_PYGRID_ADDRESS = f"http://localhost:{DEFAULT_PYGRID_PORT}" +INTERNAL_PROXY_TO_RATHOLE = "http://proxy:80/rtunnel/" class Routes(Enum): @@ -133,18 +130,7 @@ class Routes(Enum): STREAM = f"{API_PATH}/stream" -@serializable(attrs=["proxy_target_uid", "url"]) -class HTTPConnectionV2(NodeConnection): - __canonical_name__ = "HTTPConnection" - __version__ = SYFT_OBJECT_VERSION_2 - - url: GridURL - proxy_target_uid: UID | None = None - routes: type[Routes] = Routes - session_cache: Session | None = None - - -@serializable(attrs=["proxy_target_uid", "url"]) +@serializable(attrs=["proxy_target_uid", "url", "rtunnel_token"]) class HTTPConnection(NodeConnection): __canonical_name__ = "HTTPConnection" __version__ = SYFT_OBJECT_VERSION_3 @@ -154,6 +140,7 @@ class HTTPConnection(NodeConnection): routes: type[Routes] = Routes session_cache: Session | None = None headers: dict[str, str] | None = None + rtunnel_token: str | None = None @field_validator("url", mode="before") @classmethod @@ -168,7 +155,11 @@ def set_headers(self, headers: dict[str, str]) -> None: self.headers = headers def with_proxy(self, proxy_target_uid: UID) -> Self: - return HTTPConnection(url=self.url, proxy_target_uid=proxy_target_uid) + return HTTPConnection( + url=self.url, + proxy_target_uid=proxy_target_uid, + rtunnel_token=self.rtunnel_token, + ) def stream_via(self, proxy_uid: UID, url_path: str) -> GridURL: # Update the presigned url path to @@ -201,16 +192,28 @@ def session(self) -> Session: self.session_cache = session return self.session_cache - def _make_get(self, path: str, params: dict | None = None) -> bytes: + def _make_get( + self, path: str, params: dict | None = None, stream: bool = False + ) -> bytes | Iterable: if params is None: - return self._make_get_no_params(path) - url = self.url.with_path(path) + return self._make_get_no_params(path, stream=stream) + + url = self.url + + if self.rtunnel_token: + self.headers = {} if self.headers is None else self.headers + url = GridURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) + response = self.session.get( str(url), headers=self.headers, verify=verify_tls(), proxies={}, params=params, + stream=stream, ) if response.status_code != 200: raise requests.ConnectionError( @@ -223,14 +226,22 @@ def _make_get(self, path: str, params: dict | None = None) -> bytes: return response.content @cached(cache=TTLCache(maxsize=128, ttl=300)) - def _make_get_no_params(self, path: str) -> bytes: - print(path) - url = self.url.with_path(path) + def _make_get_no_params(self, path: str, stream: bool = False) -> bytes | Iterable: + url = self.url + + if self.rtunnel_token: + self.headers = {} if self.headers is None else self.headers + url = GridURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) + response = self.session.get( str(url), headers=self.headers, verify=verify_tls(), proxies={}, + stream=stream, ) if response.status_code != 200: raise requests.ConnectionError( @@ -240,15 +251,54 @@ def _make_get_no_params(self, path: str) -> bytes: # upgrade to tls if available self.url = upgrade_tls(self.url, response) + if stream: + return response.iter_content(chunk_size=None) + return response.content + def _make_put( + self, path: str, data: bytes | Generator, stream: bool = False + ) -> Response: + url = self.url + + if self.rtunnel_token: + url = GridURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers = {} if self.headers is None else self.headers + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) + response = self.session.put( + str(url), + verify=verify_tls(), + proxies={}, + data=data, + headers=self.headers, + stream=stream, + ) + if response.status_code != 200: + raise requests.ConnectionError( + f"Failed to fetch {url}. Response returned with code {response.status_code}" + ) + + # upgrade to tls if available + self.url = upgrade_tls(self.url, response) + + return response + def _make_post( self, path: str, json: dict[str, Any] | None = None, data: bytes | None = None, ) -> bytes: - url = self.url.with_path(path) + url = self.url + + if self.rtunnel_token: + url = GridURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + self.headers = {} if self.headers is None else self.headers + self.headers["Host"] = self.url.host_or_ip + + url = url.with_path(path) response = self.session.post( str(url), headers=self.headers, @@ -357,8 +407,17 @@ def register(self, new_user: UserCreate) -> SyftSigningKey: def make_call(self, signed_call: SignedSyftAPICall) -> Any | SyftError: msg_bytes: bytes = _serialize(obj=signed_call, to_bytes=True) + + if self.rtunnel_token: + api_url = GridURL.from_url(INTERNAL_PROXY_TO_RATHOLE) + api_url = api_url.with_path(self.routes.ROUTE_API_CALL.value) + self.headers = {} if self.headers is None else self.headers + self.headers["Host"] = self.url.host_or_ip + else: + api_url = self.api_url + response = requests.post( # nosec - url=str(self.api_url), + url=api_url, data=msg_bytes, headers=self.headers, ) @@ -403,7 +462,7 @@ def get_client_type(self) -> type[SyftClient] | SyftError: @serializable() class PythonConnection(NodeConnection): __canonical_name__ = "PythonConnection" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_3 node: AbstractNode proxy_target_uid: UID | None = None @@ -697,7 +756,10 @@ def guest(self) -> Self: ) def exchange_route( - self, client: Self, protocol: SyftProtocol = SyftProtocol.HTTP + self, + client: Self, + protocol: SyftProtocol = SyftProtocol.HTTP, + reverse_tunnel: bool = False, ) -> SyftSuccess | SyftError: # relative from ..service.network.routes import connection_to_route @@ -712,6 +774,7 @@ def exchange_route( self_node_route=self_node_route, remote_node_route=remote_node_route, remote_node_verify_key=client.metadata.to(NodeMetadata).verify_key, + reverse_tunnel=reverse_tunnel, ) else: raise ValueError( diff --git a/packages/syft/src/syft/client/connection.py b/packages/syft/src/syft/client/connection.py index e82db863e8a..0899532818a 100644 --- a/packages/syft/src/syft/client/connection.py +++ b/packages/syft/src/syft/client/connection.py @@ -2,13 +2,16 @@ from typing import Any # relative -from ..types.syft_object import SYFT_OBJECT_VERSION_2 +from ..types.syft_object import SYFT_OBJECT_VERSION_3 from ..types.syft_object import SyftObject +from ..types.uid import UID class NodeConnection(SyftObject): __canonical_name__ = "NodeConnection" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_3 + + id: UID | None = None # type: ignore def get_cache_key(self) -> str: raise NotImplementedError diff --git a/packages/syft/src/syft/client/domain_client.py b/packages/syft/src/syft/client/domain_client.py index cc39ac05e49..c131e5beab5 100644 --- a/packages/syft/src/syft/client/domain_client.py +++ b/packages/syft/src/syft/client/domain_client.py @@ -288,6 +288,7 @@ def connect_to_gateway( email: str | None = None, password: str | None = None, protocol: str | SyftProtocol = SyftProtocol.HTTP, + reverse_tunnel: bool = False, ) -> SyftSuccess | SyftError | None: if isinstance(protocol, str): protocol = SyftProtocol(protocol) @@ -305,7 +306,11 @@ def connect_to_gateway( if isinstance(client, SyftError): return client - res = self.exchange_route(client, protocol=protocol) + res = self.exchange_route( + client, + protocol=protocol, + reverse_tunnel=reverse_tunnel, + ) if isinstance(res, SyftSuccess): if self.metadata: return SyftSuccess( diff --git a/packages/syft/src/syft/custom_worker/k8s.py b/packages/syft/src/syft/custom_worker/k8s.py index 7f76cb94337..c3d047a0daa 100644 --- a/packages/syft/src/syft/custom_worker/k8s.py +++ b/packages/syft/src/syft/custom_worker/k8s.py @@ -9,8 +9,10 @@ # third party import kr8s from kr8s.objects import APIObject +from kr8s.objects import ConfigMap from kr8s.objects import Pod from kr8s.objects import Secret +from kr8s.objects import Service from pydantic import BaseModel from typing_extensions import Self @@ -168,6 +170,25 @@ def b64encode_secret(data: str) -> str: """Convert the data to base64 encoded string for Secret.""" return base64.b64encode(data.encode()).decode() + @staticmethod + def get_configmap(client: kr8s.Api, name: str) -> ConfigMap | None: + config_map = client.get("configmaps", name) + return config_map[0] if config_map else None + + @staticmethod + def get_service(client: kr8s.Api, name: str) -> Service | None: + service = client.get("services", name) + return service[0] if service else None + + @staticmethod + def update_configmap( + config_map: ConfigMap, + patch: dict, + ) -> None: + existing_data = config_map.raw + existing_data.update(patch) + config_map.patch(patch=existing_data) + @staticmethod def create_dockerconfig_secret( secret_name: str, diff --git a/packages/syft/src/syft/node/node.py b/packages/syft/src/syft/node/node.py index ea9ab3f8a47..04851c2b17f 100644 --- a/packages/syft/src/syft/node/node.py +++ b/packages/syft/src/syft/node/node.py @@ -121,6 +121,7 @@ from ..store.blob_storage import BlobStorageConfig from ..store.blob_storage.on_disk import OnDiskBlobStorageClientConfig from ..store.blob_storage.on_disk import OnDiskBlobStorageConfig +from ..store.blob_storage.seaweedfs import SeaweedFSBlobDeposit from ..store.dict_document_store import DictStoreConfig from ..store.document_store import StoreConfig from ..store.linked_obj import LinkedObject @@ -1184,7 +1185,7 @@ def forward_message( # relative from ..store.blob_storage import BlobRetrievalByURL - if isinstance(result, BlobRetrievalByURL): + if isinstance(result, BlobRetrievalByURL | SeaweedFSBlobDeposit): result.proxy_node_uid = peer.id return result diff --git a/packages/syft/src/syft/node/routes.py b/packages/syft/src/syft/node/routes.py index 8be45245190..37baaff90e8 100644 --- a/packages/syft/src/syft/node/routes.py +++ b/packages/syft/src/syft/node/routes.py @@ -1,6 +1,7 @@ # stdlib import base64 import binascii +from collections.abc import AsyncGenerator import logging from typing import Annotated @@ -18,6 +19,7 @@ # relative from ..abstract_node import AbstractNode +from ..client.connection import NodeConnection from ..protocol.data_protocol import PROTOCOL_TYPE from ..serde.deserialize import _deserialize as deserialize from ..serde.serialize import _serialize as serialize @@ -52,7 +54,7 @@ def make_routes(worker: Worker) -> APIRouter: async def get_body(request: Request) -> bytes: return await request.body() - def _blob_url(peer_uid: UID, presigned_url: str) -> str: + def _get_node_connection(peer_uid: UID) -> NodeConnection: # relative from ..service.network.node_peer import route_to_connection @@ -60,12 +62,10 @@ def _blob_url(peer_uid: UID, presigned_url: str) -> str: peer = network_service.stash.get_by_uid(worker.verify_key, peer_uid).ok() peer_node_route = peer.pick_highest_priority_route() connection = route_to_connection(route=peer_node_route) - url = connection.to_blob_route(presigned_url) - - return str(url) + return connection @router.get("/stream/{peer_uid}/{url_path}/", name="stream") - async def stream(peer_uid: str, url_path: str) -> StreamingResponse: + async def stream_download(peer_uid: str, url_path: str) -> StreamingResponse: try: url_path_parsed = base64.urlsafe_b64decode(url_path.encode()).decode() except binascii.Error: @@ -73,16 +73,45 @@ async def stream(peer_uid: str, url_path: str) -> StreamingResponse: peer_uid_parsed = UID.from_string(peer_uid) - url = _blob_url(peer_uid=peer_uid_parsed, presigned_url=url_path_parsed) - try: - resp = requests.get(url=url, stream=True) # nosec - resp.raise_for_status() + peer_connection = _get_node_connection(peer_uid_parsed) + url = peer_connection.to_blob_route(url_path_parsed) + stream_response = peer_connection._make_get(url.path, stream=True) except requests.RequestException: raise HTTPException(404, "Failed to retrieve data from domain.") - return StreamingResponse( - resp.iter_content(chunk_size=None), media_type="text/event-stream" + return StreamingResponse(stream_response, media_type="text/event-stream") + + async def read_request_body_in_chunks( + request: Request, + ) -> AsyncGenerator[bytes, None]: + async for chunk in request.stream(): + yield chunk + + @router.put("/stream/{peer_uid}/{url_path}/", name="stream") + async def stream_upload(peer_uid: str, url_path: str, request: Request) -> Response: + try: + url_path_parsed = base64.urlsafe_b64decode(url_path.encode()).decode() + except binascii.Error: + raise HTTPException(404, "Invalid `url_path`.") + + data = await request.body() + + peer_uid_parsed = UID.from_string(peer_uid) + + try: + peer_connection = _get_node_connection(peer_uid_parsed) + url = peer_connection.to_blob_route(url_path_parsed) + + print("Url on stream", url.path) + response = peer_connection._make_put(url.path, data=data, stream=True) + except requests.RequestException: + raise HTTPException(404, "Failed to upload data to domain") + + return Response( + content=response.content, + headers=response.headers, + media_type="application/octet-stream", ) @router.get( diff --git a/packages/syft/src/syft/protocol/protocol_version.json b/packages/syft/src/syft/protocol/protocol_version.json index 5054b847ee4..c08ea39b7b2 100644 --- a/packages/syft/src/syft/protocol/protocol_version.json +++ b/packages/syft/src/syft/protocol/protocol_version.json @@ -274,9 +274,14 @@ } }, "HTTPConnection": { + "2": { + "version": 2, + "hash": "68409295f8916ceb22a8cf4abf89f5e4bcff0d75dc37e16ede37250ada28df59", + "action": "remove" + }, "3": { "version": 3, - "hash": "54b452bb4ab76691ac1e704b62e7bcec740850fea00805145259b37973ecd0f4", + "hash": "cac31ba98bdcc42c0717555a0918d0c8aef0d2235f892a2d86dceff09930fb88", "action": "add" } }, @@ -373,6 +378,54 @@ "hash": "1b9bd1d3d096abab5617c2ff597b4c80751f686d16482a2cff4efd8741b84d53", "action": "add" } + }, + "PythonConnection": { + "2": { + "version": 2, + "hash": "eb479c671fc112b2acbedb88bc5624dfdc9592856c04c22c66410f6c863e1708", + "action": "remove" + }, + "3": { + "version": 3, + "hash": "1084c85a59c0436592530b5fe9afc2394088c8d16faef2b19fdb9fb83ff0f0e2", + "action": "add" + } + }, + "HTTPNodeRoute": { + "2": { + "version": 2, + "hash": "2134ea812f7c6ea41522727ae087245c4b1195ffbad554db638070861cd9eb1c", + "action": "remove" + }, + "3": { + "version": 3, + "hash": "9e7e3700a2f7b1a67f054efbcb31edc71bbf358c469c85ed7760b81233803bac", + "action": "add" + } + }, + "PythonNodeRoute": { + "2": { + "version": 2, + "hash": "3eca5767ae4a8fbe67744509e58c6d9fb78f38fa0a0f7fcf5960ab4250acc1f0", + "action": "remove" + }, + "3": { + "version": 3, + "hash": "1bc413ec7c1d498ec945878e21e00affd9bd6d53b564b1e10e52feb09f177d04", + "action": "add" + } + }, + "SeaweedFSBlobDeposit": { + "3": { + "version": 3, + "hash": "05e61e6328b085b738e5d41c0781d87852d44d218894cb3008f5be46e337f6d8", + "action": "remove" + }, + "4": { + "version": 4, + "hash": "f475543ed5e0066ca09c0dfd8c903e276d4974519e9958473d8141f8d446c881", + "action": "add" + } } } } diff --git a/packages/syft/src/syft/service/network/association_request.py b/packages/syft/src/syft/service/network/association_request.py index 94f99695392..a13dd5085d3 100644 --- a/packages/syft/src/syft/service/network/association_request.py +++ b/packages/syft/src/syft/service/network/association_request.py @@ -1,5 +1,6 @@ # stdlib import secrets +from typing import cast # third party from result import Err @@ -32,6 +33,16 @@ class AssociationRequestChange(Change): def _run( self, context: ChangeContext, apply: bool ) -> Result[tuple[bytes, NodePeer], SyftError]: + """ + Executes the association request. + + Args: + context (ChangeContext): The change context. + apply (bool): A flag indicating whether to apply the association request. + + Returns: + Result[tuple[bytes, NodePeer], SyftError]: The result of the association request. + """ # relative from .network_service import NetworkService @@ -41,43 +52,63 @@ def _run( SyftError(message="Undo not supported for AssociationRequestChange") ) + # Get the network service service_ctx = context.to_service_ctx() + network_service = cast( + NetworkService, service_ctx.node.get_service(NetworkService) + ) + network_stash = network_service.stash - try: - remote_client: SyftClient = self.remote_peer.client_with_context( - context=service_ctx + # Check if remote peer to be added is via reverse tunnel + rtunnel_route = self.remote_peer.get_rtunnel_route() + add_rtunnel_route = ( + rtunnel_route is not None + and self.remote_peer.latest_added_route == rtunnel_route + ) + + # If the remote peer is added via reverse tunnel, we skip ping to peer + if add_rtunnel_route: + network_service.set_reverse_tunnel_config( + context=context, + remote_node_peer=self.remote_peer, ) - if remote_client.is_err(): - return SyftError( - message=f"Failed to create remote client for peer: " - f"{self.remote_peer.id}. Error: {remote_client.err()}" + else: + # Pinging the remote peer to verify the connection + try: + remote_client: SyftClient = self.remote_peer.client_with_context( + context=service_ctx ) - remote_client = remote_client.ok() - random_challenge = secrets.token_bytes(16) - remote_res = remote_client.api.services.network.ping( - challenge=random_challenge - ) - except Exception as e: - return SyftError(message="Remote Peer cannot ping peer:" + str(e)) - - if isinstance(remote_res, SyftError): - return Err(remote_res) + if remote_client.is_err(): + return SyftError( + message=f"Failed to create remote client for peer: " + f"{self.remote_peer.id}. Error: {remote_client.err()}" + ) + remote_client = remote_client.ok() + random_challenge = secrets.token_bytes(16) + remote_res = remote_client.api.services.network.ping( + challenge=random_challenge + ) + except Exception as e: + return SyftError(message="Remote Peer cannot ping peer:" + str(e)) - challenge_signature = remote_res + if isinstance(remote_res, SyftError): + return Err(remote_res) - # Verifying if the challenge is valid - try: - self.remote_peer.verify_key.verify_key.verify( - random_challenge, challenge_signature - ) - except Exception as e: - return Err(SyftError(message=str(e))) + challenge_signature = remote_res - network_stash = service_ctx.node.get_service(NetworkService).stash + # Verifying if the challenge is valid + try: + self.remote_peer.verify_key.verify_key.verify( + random_challenge, challenge_signature + ) + except Exception as e: + return Err(SyftError(message=str(e))) + # Adding the remote peer to the network stash result = network_stash.create_or_update_peer( service_ctx.node.verify_key, self.remote_peer ) + if result.is_err(): return Err(SyftError(message=str(result.err()))) diff --git a/packages/syft/src/syft/service/network/network_service.py b/packages/syft/src/syft/service/network/network_service.py index c32374ade31..35de9bfaf91 100644 --- a/packages/syft/src/syft/service/network/network_service.py +++ b/packages/syft/src/syft/service/network/network_service.py @@ -4,6 +4,7 @@ import logging import secrets from typing import Any +from typing import cast # third party from result import Result @@ -30,7 +31,10 @@ from ...types.transforms import transform_method from ...types.uid import UID from ...util.telemetry import instrument +from ...util.util import generate_token +from ...util.util import get_env from ...util.util import prompt_warning_message +from ...util.util import str_to_bool from ..context import AuthedServiceContext from ..data_subject.data_subject import NamePartitionKey from ..metadata.node_metadata import NodeMetadata @@ -51,6 +55,7 @@ from .association_request import AssociationRequestChange from .node_peer import NodePeer from .node_peer import NodePeerUpdate +from .reverse_tunnel_service import ReverseTunnelService from .routes import HTTPNodeRoute from .routes import NodeRoute from .routes import NodeRouteType @@ -62,6 +67,12 @@ NodeTypePartitionKey = PartitionKey(key="node_type", type_=NodeType) OrderByNamePartitionKey = PartitionKey(key="name", type_=str) +REVERSE_TUNNEL_ENABLED = "REVERSE_TUNNEL_ENABLED" + + +def reverse_tunnel_enabled() -> bool: + return str_to_bool(get_env(REVERSE_TUNNEL_ENABLED, "false")) + @serializable() class NodePeerAssociationStatus(Enum): @@ -116,19 +127,19 @@ def create_or_update_peer( valid = self.check_type(peer, NodePeer) if valid.is_err(): return SyftError(message=valid.err()) - - existing = self.get_by_uid(credentials=credentials, uid=peer.id) - if existing.is_ok() and existing.ok() is not None: - existing_peer: NodePeer = existing.ok() + existing: Result | NodePeer = self.get_by_uid( + credentials=credentials, uid=peer.id + ) + if existing.is_ok() and existing.ok(): + existing_peer = existing.ok() existing_peer.update_routes(peer.node_routes) peer_update = NodePeerUpdate( id=peer.id, node_routes=existing_peer.node_routes ) result = self.update(credentials, peer_update) - return result else: result = self.set(credentials, peer) - return result + return result def get_by_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey @@ -154,6 +165,8 @@ class NetworkService(AbstractService): def __init__(self, store: DocumentStore) -> None: self.store = store self.stash = NetworkStash(store=store) + if reverse_tunnel_enabled(): + self.rtunnel_service = ReverseTunnelService() @service_method( path="network.exchange_credentials_with", @@ -167,6 +180,7 @@ def exchange_credentials_with( self_node_route: NodeRoute, remote_node_route: NodeRoute, remote_node_verify_key: SyftVerifyKey, + reverse_tunnel: bool = False, ) -> Request | SyftSuccess | SyftError: """ Exchange Route With Another Node. If there is a pending association request, return it @@ -175,6 +189,14 @@ def exchange_credentials_with( # Step 1: Validate the Route self_node_peer = self_node_route.validate_with_context(context=context) + if reverse_tunnel and not reverse_tunnel_enabled(): + return SyftError(message="Reverse tunneling is not enabled on this node.") + elif reverse_tunnel: + _rtunnel_route = self_node_peer.node_routes[-1] + _rtunnel_route.rtunnel_token = generate_token() + _rtunnel_route.host_or_ip = f"{self_node_peer.name}.syft.local" + self_node_peer.node_routes[-1] = _rtunnel_route + if isinstance(self_node_peer, SyftError): return self_node_peer @@ -186,91 +208,65 @@ def exchange_credentials_with( ) remote_node_peer = NodePeer.from_client(remote_client) - # check locally if the remote node already exists as a peer - existing_peer_result = self.stash.get_by_uid( - context.node.verify_key, remote_node_peer.id + # Step 3: Check remotely if the self node already exists as a peer + # Update the peer if it exists, otherwise add it + remote_self_node_peer = remote_client.api.services.network.get_peer_by_name( + name=self_node_peer.name ) - if ( - existing_peer_result.is_ok() - and (existing_peer := existing_peer_result.ok()) is not None - ): - logger.info( - f"{remote_node_peer.node_type} '{remote_node_peer.name}' already exist as a peer for " - f"{self_node_peer.node_type} '{self_node_peer.name}'." - ) - - if existing_peer != remote_node_peer: - result = self.stash.create_or_update_peer( - context.node.verify_key, - remote_node_peer, - ) - if result.is_err(): - return SyftError( - message=f"Failed to update peer: {remote_node_peer.name} information." - ) - logger.info( - f"{existing_peer.node_type} peer '{existing_peer.name}' information successfully updated." - ) - # Also check remotely if the self node already exists as a peer - remote_self_node_peer = remote_client.api.services.network.get_peer_by_name( - name=self_node_peer.name + association_request_approved = True + if isinstance(remote_self_node_peer, NodePeer): + updated_peer = NodePeerUpdate( + id=self_node_peer.id, node_routes=self_node_peer.node_routes ) - if isinstance(remote_self_node_peer, NodePeer): - logger.info( - f"{self_node_peer.node_type} '{self_node_peer.name}' already exist " - f"as a peer for {remote_node_peer.node_type} '{remote_node_peer.name}'." + result = remote_client.api.services.network.update_peer( + peer_update=updated_peer + ) + if isinstance(result, SyftError): + logger.error( + f"Failed to update peer information on remote client. {result.message}" ) - if remote_self_node_peer != self_node_peer: - updated_peer = NodePeerUpdate( - id=self_node_peer.id, node_routes=self_node_peer.node_routes - ) - result = remote_client.api.services.network.update_peer( - peer_update=updated_peer - ) - logger.info( - f"{self_node_peer.node_type} peer '{self_node_peer.name}' information change detected." - ) - if isinstance(result, SyftError): - logger.error( - f"Attempt to remotely update {self_node_peer.node_type} peer " - f"'{self_node_peer.name}' information remotely failed. Error: {result.message}" - ) - return SyftError(message="Failed to update peer information.") - - logger.info( - f"{self_node_peer.node_type} peer '{self_node_peer.name}' " - f"information successfully updated." - ) - msg = ( - f"Routes between {remote_node_peer.node_type} '{remote_node_peer.name}' and " - f"{self_node_peer.node_type} '{self_node_peer.name}' already exchanged." + return SyftError( + message=f"Failed to add peer information on remote client : {remote_client.id}" ) - return SyftSuccess(message="\n".join(msg)) # If peer does not exist, ask the remote client to add this node # (represented by `self_node_peer`) as a peer - random_challenge = secrets.token_bytes(16) - remote_res = remote_client.api.services.network.add_peer( - peer=self_node_peer, - challenge=random_challenge, - self_node_route=remote_node_route, - verify_key=remote_node_verify_key, - ) + if remote_self_node_peer is None: + random_challenge = secrets.token_bytes(16) + remote_res = remote_client.api.services.network.add_peer( + peer=self_node_peer, + challenge=random_challenge, + self_node_route=remote_node_route, + verify_key=remote_node_verify_key, + ) - if isinstance(remote_res, SyftError): - return remote_res + if isinstance(remote_res, SyftError): + return SyftError( + message=f"Failed to add peer to remote client: {remote_client.id}. Error: {remote_res.message}" + ) - association_request_approved = not isinstance(remote_res, Request) + association_request_approved = not isinstance(remote_res, Request) - # save the remote peer for later + # Step 4: Save the remote peer for later result = self.stash.create_or_update_peer( context.node.verify_key, remote_node_peer, ) if result.is_err(): + logging.error( + f"Failed to save peer: {remote_node_peer}. Error: {result.err()}" + ) return SyftError(message="Failed to update route information.") + # Step 5: Save config to enable reverse tunneling + if reverse_tunnel and reverse_tunnel_enabled(): + self.set_reverse_tunnel_config( + context=context, + self_node_peer=self_node_peer, + remote_node_peer=remote_node_peer, + ) + return ( SyftSuccess(message="Routes Exchanged") if association_request_approved @@ -486,10 +482,44 @@ def update_peer( return SyftError( message=f"Failed to update peer '{peer_update.name}'. Error: {result.err()}" ) + + peer = result.ok() + + self.set_reverse_tunnel_config(context=context, remote_node_peer=peer) return SyftSuccess( message=f"Peer '{result.ok().name}' information successfully updated." ) + def set_reverse_tunnel_config( + self, + context: AuthedServiceContext, + remote_node_peer: NodePeer, + self_node_peer: NodePeer | None = None, + ) -> None: + node_type = cast(NodeType, context.node.node_type) + if node_type.value == NodeType.GATEWAY.value: + rtunnel_route = remote_node_peer.get_rtunnel_route() + ( + self.rtunnel_service.set_server_config(remote_node_peer) + if rtunnel_route + else None + ) + else: + self_node_peer = ( + context.node.settings.to(NodePeer) + if self_node_peer is None + else self_node_peer + ) + rtunnel_route = self_node_peer.get_rtunnel_route() + ( + self.rtunnel_service.set_client_config( + self_node_peer=self_node_peer, + remote_node_route=remote_node_peer.pick_highest_priority_route(), + ) + if rtunnel_route + else None + ) + @service_method( path="network.delete_peer_by_id", name="delete_peer_by_id", @@ -499,6 +529,24 @@ def delete_peer_by_id( self, context: AuthedServiceContext, uid: UID ) -> SyftSuccess | SyftError: """Delete Node Peer""" + retrieve_result = self.stash.get_by_uid(context.credentials, uid) + if err := retrieve_result.is_err(): + return SyftError( + message=f"Failed to retrieve peer with UID {uid}: {retrieve_result.err()}." + ) + peer_to_delete = cast(NodePeer, retrieve_result.ok()) + + node_side_type = cast(NodeType, context.node.node_type) + if node_side_type.value == NodeType.GATEWAY.value: + rtunnel_route = peer_to_delete.get_rtunnel_route() + ( + self.rtunnel_service.clear_server_config(peer_to_delete) + if rtunnel_route + else None + ) + + # TODO: Handle the case when peer is deleted from domain node + result = self.stash.delete_by_uid(context.credentials, uid) if err := result.is_err(): return SyftError(message=f"Failed to delete peer with UID {uid}: {err}.") @@ -588,12 +636,13 @@ def add_route( if isinstance(remote_node_peer, SyftError): return remote_node_peer # add and update the priority for the peer - existed_route: NodeRoute | None = remote_node_peer.update_route(route) - if existed_route: + if route in remote_node_peer.node_routes: return SyftSuccess( message=f"The route already exists between '{context.node.name}' and " - f"peer '{remote_node_peer.name}' with id '{existed_route.id}'." + f"peer '{remote_node_peer.name}'." ) + + remote_node_peer.update_route(route=route) # update the peer in the store with the updated routes peer_update = NodePeerUpdate( id=remote_node_peer.id, node_routes=remote_node_peer.node_routes @@ -615,8 +664,7 @@ def delete_route_on_peer( self, context: AuthedServiceContext, peer: NodePeer, - route: NodeRoute | None = None, - route_id: UID | None = None, + route: NodeRoute, ) -> SyftSuccess | SyftError | SyftInfo: """ Delete the route on the remote peer. @@ -625,7 +673,6 @@ def delete_route_on_peer( context (AuthedServiceContext): The authentication context for the service. peer (NodePeer): The peer for which the route will be deleted. route (NodeRoute): The route to be deleted. - route_id (UID): The UID of the route to be deleted. Returns: SyftSuccess: If the route is successfully deleted. @@ -633,17 +680,6 @@ def delete_route_on_peer( SyftInfo: If there is only one route left for the peer and the admin chose not to remove it """ - if route is None and route_id is None: - return SyftError( - message="Either `route` or `route_id` arg must be provided" - ) - - if route and route_id and route.id != route_id: - return SyftError( - message=f"Both `route` and `route_id` are provided, but " - f"route's id ({route.id}) and route_id ({route_id}) do not match" - ) - # creates a client on the remote node based on the credentials # of the current node's client remote_client = peer.client_with_context(context=context) @@ -657,7 +693,6 @@ def delete_route_on_peer( result = remote_client.api.services.network.delete_route( peer_verify_key=context.credentials, route=route, - route_id=route_id, called_by_peer=True, ) return result @@ -670,7 +705,6 @@ def delete_route( context: AuthedServiceContext, peer_verify_key: SyftVerifyKey, route: NodeRoute | None = None, - route_id: UID | None = None, called_by_peer: bool = False, ) -> SyftSuccess | SyftError | SyftInfo: """ @@ -682,7 +716,6 @@ def delete_route( context (AuthedServiceContext): The authentication context for the service. peer_verify_key (SyftVerifyKey): The verify key of the remote node peer. route (NodeRoute): The route to be deleted. - route_id (UID): The UID of the route to be deleted. called_by_peer (bool): The flag to indicate that it's called by a remote peer. Returns: @@ -724,20 +757,12 @@ def delete_route( f"'{remote_node_peer.node_routes[0].id}' was not deleted." ) - if route: - result = remote_node_peer.delete_route(route=route) - return_message = ( - f"Route '{str(route)}' with id '{route.id}' to peer " - f"{remote_node_peer.node_type.value} '{remote_node_peer.name}' " - f"was deleted for {str(context.node.node_type)} '{context.node.name}'." - ) - if route_id: - result = remote_node_peer.delete_route(route_id=route_id) - return_message = ( - f"Route with id '{route_id}' to peer " - f"{remote_node_peer.node_type.value} '{remote_node_peer.name}' " - f"was deleted for {str(context.node.node_type)} '{context.node.name}'." - ) + result = remote_node_peer.delete_route(route=route) + return_message = ( + f"Route '{str(route)}' to peer " + f"{remote_node_peer.node_type.value} '{remote_node_peer.name}' " + f"was deleted for {str(context.node.node_type)} '{context.node.name}'." + ) if isinstance(result, SyftError): return result @@ -888,7 +913,7 @@ def _get_remote_node_peer_by_verify_key( remote_node_peer = remote_node_peer.ok() if remote_node_peer is None: return SyftError( - message=f"Can't retrive {remote_node_peer.name} from the store of peers (None)." + message=f"Can't retrieve {remote_node_peer.name} from the store of peers (None)." ) return remote_node_peer @@ -930,6 +955,7 @@ def from_grid_url(context: TransformContext) -> TransformContext: context.output["private"] = False context.output["proxy_target_uid"] = context.obj.proxy_target_uid context.output["priority"] = 1 + context.output["rtunnel_token"] = context.obj.rtunnel_token return context @@ -966,7 +992,11 @@ def node_route_to_http_connection( url = GridURL( protocol=obj.protocol, host_or_ip=obj.host_or_ip, port=obj.port ).as_container_host() - return HTTPConnection(url=url, proxy_target_uid=obj.proxy_target_uid) + return HTTPConnection( + url=url, + proxy_target_uid=obj.proxy_target_uid, + rtunnel_token=obj.rtunnel_token, + ) @transform(NodeMetadata, NodePeer) diff --git a/packages/syft/src/syft/service/network/node_peer.py b/packages/syft/src/syft/service/network/node_peer.py index 5835cf7aa9e..23c1ffc7057 100644 --- a/packages/syft/src/syft/service/network/node_peer.py +++ b/packages/syft/src/syft/service/network/node_peer.py @@ -91,22 +91,17 @@ class NodePeer(SyftObject): ping_status_message: str | None = None pinged_timestamp: DateTime | None = None - def existed_route( - self, route: NodeRouteType | None = None, route_id: UID | None = None - ) -> tuple[bool, int | None]: + def existed_route(self, route: NodeRouteType) -> tuple[bool, int | None]: """Check if a route exists in self.node_routes Args: route: the route to be checked. For now it can be either - HTTPNodeRoute or PythonNodeRoute or VeilidNodeRoute - route_id: the id of the route to be checked + HTTPNodeRoute or PythonNodeRoute Returns: if the route exists, returns (True, index of the existed route in self.node_routes) if the route does not exist returns (False, None) """ - if route_id is None and route is None: - raise ValueError("Either route or route_id should be provided in args") if route: if not isinstance(route, HTTPNodeRoute | PythonNodeRoute | VeilidNodeRoute): @@ -115,11 +110,6 @@ def existed_route( if route == r: return (True, i) - elif route_id: - for i, r in enumerate(self.node_routes): - if r.id == route_id: - return (True, i) - return (False, None) def update_route_priority(self, route: NodeRoute) -> NodeRoute: @@ -161,25 +151,21 @@ def pick_highest_priority_route(self, oldest: bool = True) -> NodeRoute: highest_priority_route = route return highest_priority_route - def update_route(self, route: NodeRoute) -> NodeRoute | None: + def update_route(self, route: NodeRoute) -> None: """ Update the route for the node. If the route already exists, return it. If the route is new, assign it to have the priority of (current_max + 1) Args: - route (NodeRoute): The new route to be added to the peer's node route list - - Returns: - NodeRoute | None: if the route already exists, return it, else returns None + route (NodeRoute): The new route to be added to the peer. """ - existed, _ = self.existed_route(route) + existed, idx = self.existed_route(route) if existed: - return route + self.node_routes[idx] = route # type: ignore else: new_route = self.update_route_priority(route) self.node_routes.append(new_route) - return None def update_routes(self, new_routes: list[NodeRoute]) -> None: """ @@ -220,7 +206,7 @@ def update_existed_route_priority( message="Priority must be greater than 0. Now it is {priority}." ) - existed, index = self.existed_route(route_id=route.id) + existed, index = self.existed_route(route=route) if not existed or index is None: return SyftError(message=f"Route with id {route.id} does not exist.") @@ -244,6 +230,16 @@ def from_client(client: SyftClient) -> "NodePeer": peer.node_routes.append(route) return peer + @property + def latest_added_route(self) -> NodeRoute | None: + """ + Returns the latest added route from the list of node routes. + + Returns: + NodeRoute | None: The latest added route, or None if there are no routes. + """ + return self.node_routes[-1] if self.node_routes else None + def client_with_context( self, context: NodeServiceContext ) -> Result[type[SyftClient], str]: @@ -289,28 +285,23 @@ def guest_client(self) -> SyftClient: def proxy_from(self, client: SyftClient) -> SyftClient: return client.proxy_to(self) - def delete_route( - self, route: NodeRouteType | None = None, route_id: UID | None = None - ) -> SyftError | None: + def get_rtunnel_route(self) -> HTTPNodeRoute | None: + for route in self.node_routes: + if hasattr(route, "rtunnel_token") and route.rtunnel_token: + return route + return None + + def delete_route(self, route: NodeRouteType) -> SyftError | None: """ Deletes a route from the peer's route list. Takes O(n) where is n is the number of routes in self.node_routes. Args: route (NodeRouteType): The route to be deleted; - route_id (UID): The id of the route to be deleted; Returns: - SyftError: If deleting failed + SyftError: If failing to delete node route """ - if route_id: - try: - self.node_routes = [r for r in self.node_routes if r.id != route_id] - except Exception as e: - return SyftError( - message=f"Error deleting route with id {route_id}. Exception: {e}" - ) - if route: try: self.node_routes = [r for r in self.node_routes if r != route] diff --git a/packages/syft/src/syft/service/network/rathole.py b/packages/syft/src/syft/service/network/rathole.py new file mode 100644 index 00000000000..4fd0be445b1 --- /dev/null +++ b/packages/syft/src/syft/service/network/rathole.py @@ -0,0 +1,43 @@ +# third party +from typing_extensions import Self + +# relative +from ...serde.serializable import serializable +from ...types.base import SyftBaseModel +from ...util.util import get_env +from .node_peer import NodePeer + + +def get_rathole_port() -> int: + return int(get_env("RATHOLE_PORT", "2333")) + + +@serializable() +class RatholeConfig(SyftBaseModel): + uuid: str + secret_token: str + local_addr_host: str + local_addr_port: int + server_name: str | None = None + + @property + def local_address(self) -> str: + return f"{self.local_addr_host}:{self.local_addr_port}" + + @classmethod + def from_peer(cls, peer: NodePeer) -> Self: + # relative + from .routes import HTTPNodeRoute + + high_priority_route = peer.pick_highest_priority_route() + + if not isinstance(high_priority_route, HTTPNodeRoute): + raise ValueError("Rathole only supports HTTPNodeRoute") + + return cls( + uuid=peer.id, + secret_token=peer.rtunnel_token, + local_addr_host=high_priority_route.host_or_ip, + local_addr_port=high_priority_route.port, + server_name=peer.name, + ) diff --git a/packages/syft/src/syft/service/network/rathole_config_builder.py b/packages/syft/src/syft/service/network/rathole_config_builder.py new file mode 100644 index 00000000000..15d1e8fb7eb --- /dev/null +++ b/packages/syft/src/syft/service/network/rathole_config_builder.py @@ -0,0 +1,308 @@ +# stdlib +import secrets +from typing import cast + +# third party +from kr8s.objects import Service +import yaml + +# relative +from ...custom_worker.k8s import KubeUtils +from ...custom_worker.k8s import get_kr8s_client +from ...types.uid import UID +from .node_peer import NodePeer +from .rathole import RatholeConfig +from .rathole import get_rathole_port +from .rathole_toml import RatholeClientToml +from .rathole_toml import RatholeServerToml + +RATHOLE_TOML_CONFIG_MAP = "rathole-config" +RATHOLE_PROXY_CONFIG_MAP = "proxy-config-dynamic" +PROXY_CONFIG_MAP = "proxy-config" +DEFAULT_LOCAL_ADDR_HOST = "0.0.0.0" # nosec + + +class RatholeConfigBuilder: + def __init__(self) -> None: + self.k8rs_client = get_kr8s_client() + + def add_host_to_server(self, peer: NodePeer) -> None: + """Add a host to the rathole server toml file. + + Args: + peer (NodePeer): The peer to be added to the rathole server. + + Returns: + None + """ + + rathole_route = peer.get_rtunnel_route() + if not rathole_route: + raise Exception(f"Peer: {peer} has no rathole route: {rathole_route}") + + random_port = self._get_random_port() + + peer_id = cast(UID, peer.id) + + config = RatholeConfig( + uuid=peer_id.to_string(), + secret_token=rathole_route.rtunnel_token, + local_addr_host=DEFAULT_LOCAL_ADDR_HOST, + local_addr_port=random_port, + server_name=peer.name, + ) + + # Get rathole toml config map + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeServerToml.filename + + toml_str = rathole_config_map.data[client_filename] + + # Add the peer info to the toml file + rathole_toml = RatholeServerToml(toml_str) + rathole_toml.add_config(config=config) + + # First time adding a peer + if not rathole_toml.get_rathole_listener_addr(): + bind_addr = f"localhost:{get_rathole_port()}" + rathole_toml.set_rathole_listener_addr(bind_addr) + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + # Add the peer info to the proxy config map + self._add_dynamic_addr_to_rathole(config) + + def remove_host_from_server(self, peer_id: str, server_name: str) -> None: + """Remove a host from the rathole server toml file. + + Args: + peer_id (str): The id of the peer to be removed. + server_name (str): The name of the peer to be removed. + + Returns: + None + """ + + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeServerToml.filename + + toml_str = rathole_config_map.data[client_filename] + + rathole_toml = RatholeServerToml(toml_str=toml_str) + + rathole_toml.remove_config(peer_id) + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + # Remove the peer info from the proxy config map + self._remove_dynamic_addr_from_rathole(server_name) + + def _get_random_port(self) -> int: + """Get a random port number.""" + return secrets.randbits(15) + + def add_host_to_client( + self, peer_name: str, peer_id: str, rtunnel_token: str, remote_addr: str + ) -> None: + """Add a host to the rathole client toml file.""" + + config = RatholeConfig( + uuid=peer_id, + secret_token=rtunnel_token, + local_addr_host="proxy", + local_addr_port=80, + server_name=peer_name, + ) + + # Get rathole toml config map + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeClientToml.filename + + toml_str = rathole_config_map.data[client_filename] + + rathole_toml = RatholeClientToml(toml_str=toml_str) + + rathole_toml.add_config(config=config) + + rathole_toml.set_remote_addr(remote_addr) + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + def remove_host_from_client(self, peer_id: str) -> None: + """Remove a host from the rathole client toml file.""" + + rathole_config_map = KubeUtils.get_configmap( + client=self.k8rs_client, name=RATHOLE_TOML_CONFIG_MAP + ) + + if rathole_config_map is None: + raise Exception("Rathole config map not found.") + + client_filename = RatholeClientToml.filename + + toml_str = rathole_config_map.data[client_filename] + + rathole_toml = RatholeClientToml(toml_str=toml_str) + + rathole_toml.remove_config(peer_id) + + rathole_toml.clear_remote_addr() + + data = {client_filename: rathole_toml.toml_str} + + # Update the rathole config map + KubeUtils.update_configmap(config_map=rathole_config_map, patch={"data": data}) + + def _add_dynamic_addr_to_rathole( + self, config: RatholeConfig, entrypoint: str = "web" + ) -> None: + """Add a port to the rathole proxy config map.""" + + rathole_proxy_config_map = KubeUtils.get_configmap( + self.k8rs_client, RATHOLE_PROXY_CONFIG_MAP + ) + + if rathole_proxy_config_map is None: + raise Exception("Rathole proxy config map not found.") + + rathole_proxy = rathole_proxy_config_map.data["rathole-dynamic.yml"] + + if not rathole_proxy: + rathole_proxy = {"http": {"routers": {}, "services": {}, "middlewares": {}}} + else: + rathole_proxy = yaml.safe_load(rathole_proxy) + + rathole_proxy["http"]["services"][config.server_name] = { + "loadBalancer": { + "servers": [{"url": f"http://rathole:{config.local_addr_port}"}] + } + } + + rathole_proxy["http"]["middlewares"]["strip-rathole-prefix"] = { + "replacePathRegex": {"regex": "^/rathole/(.*)", "replacement": "/$1"} + } + + proxy_rule = ( + f"Host(`{config.server_name}.syft.local`) || " + f"HostHeader(`{config.server_name}.syft.local`) && PathPrefix(`/rtunnel`)" + ) + + rathole_proxy["http"]["routers"][config.server_name] = { + "rule": proxy_rule, + "service": config.server_name, + "entryPoints": [entrypoint], + "middlewares": ["strip-rathole-prefix"], + } + + KubeUtils.update_configmap( + config_map=rathole_proxy_config_map, + patch={"data": {"rathole-dynamic.yml": yaml.safe_dump(rathole_proxy)}}, + ) + + self._expose_port_on_rathole_service(config.server_name, config.local_addr_port) + + def _remove_dynamic_addr_from_rathole(self, server_name: str) -> None: + """Remove a port from the rathole proxy config map.""" + + rathole_proxy_config_map = KubeUtils.get_configmap( + self.k8rs_client, RATHOLE_PROXY_CONFIG_MAP + ) + + if rathole_proxy_config_map is None: + raise Exception("Rathole proxy config map not found.") + + rathole_proxy = rathole_proxy_config_map.data["rathole-dynamic.yml"] + + if not rathole_proxy: + return + + rathole_proxy = yaml.safe_load(rathole_proxy) + + if server_name in rathole_proxy["http"]["routers"]: + del rathole_proxy["http"]["routers"][server_name] + + if server_name in rathole_proxy["http"]["services"]: + del rathole_proxy["http"]["services"][server_name] + + KubeUtils.update_configmap( + config_map=rathole_proxy_config_map, + patch={"data": {"rathole-dynamic.yml": yaml.safe_dump(rathole_proxy)}}, + ) + + self._remove_port_on_rathole_service(server_name) + + def _expose_port_on_rathole_service(self, port_name: str, port: int) -> None: + """Expose a port on the rathole service.""" + + rathole_service = KubeUtils.get_service(self.k8rs_client, "rathole") + + rathole_service = cast(Service, rathole_service) + + config = rathole_service.raw + + existing_port_idx = None + for idx, existing_port in enumerate(config["spec"]["ports"]): + if existing_port["name"] == port_name: + print("Port already exists.", existing_port_idx, port_name) + existing_port_idx = idx + break + + if existing_port_idx is not None: + config["spec"]["ports"][existing_port_idx]["port"] = port + config["spec"]["ports"][existing_port_idx]["targetPort"] = port + else: + config["spec"]["ports"].append( + { + "name": port_name, + "port": port, + "targetPort": port, + "protocol": "TCP", + } + ) + + rathole_service.patch(config) + + def _remove_port_on_rathole_service(self, port_name: str) -> None: + """Remove a port from the rathole service.""" + + rathole_service = KubeUtils.get_service(self.k8rs_client, "rathole") + + rathole_service = cast(Service, rathole_service) + + config = rathole_service.raw + + ports = config["spec"]["ports"] + + for port in ports: + if port["name"] == port_name: + ports.remove(port) + break + + rathole_service.patch(config) diff --git a/packages/syft/src/syft/service/network/rathole_toml.py b/packages/syft/src/syft/service/network/rathole_toml.py new file mode 100644 index 00000000000..8ded821279e --- /dev/null +++ b/packages/syft/src/syft/service/network/rathole_toml.py @@ -0,0 +1,247 @@ +# third party +import tomli +import tomli_w + +# relative +from .rathole import RatholeConfig + + +class TomlReaderWriter: + @staticmethod + def load(toml_str: str) -> dict: + return tomli.loads(toml_str) + + @staticmethod + def dump(toml_dict: str) -> str: + return tomli_w.dumps(toml_dict) + + +class RatholeBaseToml: + filename: str + + def __init__(self, toml_str: str) -> None: + self.toml_writer = TomlReaderWriter + self.toml_str = toml_str + + def read(self) -> dict: + return self.toml_writer.load(self.toml_str) + + def save(self, toml_dict: dict) -> None: + self.toml_str = self.toml_writer.dump(toml_dict) + + def _validate(self) -> bool: + raise NotImplementedError + + @property + def is_valid(self) -> bool: + return self._validate() + + +class RatholeClientToml(RatholeBaseToml): + filename: str = "client.toml" + + def set_remote_addr(self, remote_host: str) -> None: + """Add a new remote address to the client toml file.""" + + toml = self.read() + + # Add the new remote address + if "client" not in toml: + toml["client"] = {} + + toml["client"]["remote_addr"] = remote_host + + self.save(toml) + + def clear_remote_addr(self) -> None: + """Clear the remote address from the client toml file.""" + + toml = self.read() + + # Clear the remote address + if "client" not in toml: + return + + toml["client"]["remote_addr"] = "" + + self.save(toml) + + def add_config(self, config: RatholeConfig) -> None: + """Add a new config to the toml file.""" + + toml = self.read() + + # Add the new config + if "services" not in toml["client"]: + toml["client"]["services"] = {} + + if config.uuid not in toml["client"]["services"]: + toml["client"]["services"][config.uuid] = {} + + toml["client"]["services"][config.uuid] = { + "token": config.secret_token, + "local_addr": config.local_address, + } + + self.save(toml) + + def remove_config(self, uuid: str) -> None: + """Remove a config from the toml file.""" + + toml = self.read() + + # Remove the config + if "services" not in toml["client"]: + return + + if uuid not in toml["client"]["services"]: + return + + del toml["client"]["services"][uuid] + + self.save(toml) + + def update_config(self, config: RatholeConfig) -> None: + """Update a config in the toml file.""" + + toml = self.read() + + # Update the config + if "services" not in toml["client"]: + return + + if config.uuid not in toml["client"]["services"]: + return + + toml["client"]["services"][config.uuid] = { + "token": config.secret_token, + "local_addr": config.local_address, + } + + self.save(toml) + + def get_config(self, uuid: str) -> RatholeConfig | None: + """Get a config from the toml file.""" + + toml = self.read() + + # Get the config + if "services" not in toml["client"]: + return None + + if uuid not in toml["client"]["services"]: + return None + + service = toml["client"]["services"][uuid] + + return RatholeConfig( + uuid=uuid, + secret_token=service["token"], + local_addr_host=service["local_addr"].split(":")[0], + local_addr_port=service["local_addr"].split(":")[1], + ) + + def _validate(self) -> bool: + toml = self.read() + + if not toml["client"]["remote_addr"]: + return False + + for uuid, config in toml["client"]["services"].items(): + if not uuid: + return False + + if not config["token"] or not config["local_addr"]: + return False + + return True + + +class RatholeServerToml(RatholeBaseToml): + filename: str = "server.toml" + + def set_rathole_listener_addr(self, bind_addr: str) -> None: + """Set the bind address in the server toml file.""" + + toml = self.read() + + # Set the bind address + toml["server"]["bind_addr"] = bind_addr + + self.save(toml) + + def get_rathole_listener_addr(self) -> str: + """Get the bind address from the server toml file.""" + + toml = self.read() + + return toml["server"]["bind_addr"] + + def add_config(self, config: RatholeConfig) -> None: + """Add a new config to the toml file.""" + + toml = self.read() + + # Add the new config + if "services" not in toml["server"]: + toml["server"]["services"] = {} + + if config.uuid not in toml["server"]["services"]: + toml["server"]["services"][config.uuid] = {} + + toml["server"]["services"][config.uuid] = { + "token": config.secret_token, + "bind_addr": config.local_address, + } + + self.save(toml) + + def remove_config(self, uuid: str) -> None: + """Remove a config from the toml file.""" + + toml = self.read() + + # Remove the config + if "services" not in toml["server"]: + return + + if uuid not in toml["server"]["services"]: + return + + del toml["server"]["services"][uuid] + + self.save(toml) + + def update_config(self, config: RatholeConfig) -> None: + """Update a config in the toml file.""" + + toml = self.read() + + # Update the config + if "services" not in toml["server"]: + return + + if config.uuid not in toml["server"]["services"]: + return + + toml["server"]["services"][config.uuid] = { + "token": config.secret_token, + "bind_addr": config.local_address, + } + + self.save(toml) + + def _validate(self) -> bool: + toml = self.read() + + if not toml["server"]["bind_addr"]: + return False + + for uuid, config in toml["server"]["services"].items(): + if not uuid: + return False + + if not config["token"] or not config["bind_addr"]: + return False + + return True diff --git a/packages/syft/src/syft/service/network/reverse_tunnel_service.py b/packages/syft/src/syft/service/network/reverse_tunnel_service.py new file mode 100644 index 00000000000..36d5b14e151 --- /dev/null +++ b/packages/syft/src/syft/service/network/reverse_tunnel_service.py @@ -0,0 +1,48 @@ +# relative +from ...types.grid_url import GridURL +from .node_peer import NodePeer +from .rathole_config_builder import RatholeConfigBuilder +from .routes import NodeRoute + + +class ReverseTunnelService: + def __init__(self) -> None: + self.builder = RatholeConfigBuilder() + + def set_client_config( + self, + self_node_peer: NodePeer, + remote_node_route: NodeRoute, + ) -> None: + rathole_route = self_node_peer.get_rtunnel_route() + if not rathole_route: + raise Exception( + "Failed to exchange routes via . " + + f"Peer: {self_node_peer} has no rathole route: {rathole_route}" + ) + + remote_url = GridURL( + host_or_ip=remote_node_route.host_or_ip, port=remote_node_route.port + ) + rathole_remote_addr = remote_url.as_container_host() + + remote_addr = rathole_remote_addr.url_no_protocol + + self.builder.add_host_to_client( + peer_name=self_node_peer.name, + peer_id=str(self_node_peer.id), + rtunnel_token=rathole_route.rtunnel_token, + remote_addr=remote_addr, + ) + + def set_server_config(self, remote_peer: NodePeer) -> None: + rathole_route = remote_peer.get_rtunnel_route() + self.builder.add_host_to_server(remote_peer) if rathole_route else None + + def clear_client_config(self, self_node_peer: NodePeer) -> None: + self.builder.remove_host_from_client(str(self_node_peer.id)) + + def clear_server_config(self, remote_peer: NodePeer) -> None: + self.builder.remove_host_from_server( + str(remote_peer.id), server_name=remote_peer.name + ) diff --git a/packages/syft/src/syft/service/network/routes.py b/packages/syft/src/syft/service/network/routes.py index f3fa9b1ad1a..ca5ea04c999 100644 --- a/packages/syft/src/syft/service/network/routes.py +++ b/packages/syft/src/syft/service/network/routes.py @@ -18,7 +18,7 @@ from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable from ...types.syft_object import SYFT_OBJECT_VERSION_1 -from ...types.syft_object import SYFT_OBJECT_VERSION_2 +from ...types.syft_object import SYFT_OBJECT_VERSION_3 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.uid import UID @@ -87,14 +87,16 @@ def validate_with_context( @serializable() class HTTPNodeRoute(SyftObject, NodeRoute): __canonical_name__ = "HTTPNodeRoute" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_3 + id: UID | None = None # type: ignore host_or_ip: str private: bool = False protocol: str = "http" port: int = 80 proxy_target_uid: UID | None = None priority: int = 1 + rtunnel_token: str | None = None def __eq__(self, other: Any) -> bool: if not isinstance(other, HTTPNodeRoute): @@ -107,6 +109,7 @@ def __hash__(self) -> int: + hash(self.port) + hash(self.protocol) + hash(self.proxy_target_uid) + + hash(self.rtunnel_token) ) def __str__(self) -> str: @@ -116,8 +119,9 @@ def __str__(self) -> str: @serializable() class PythonNodeRoute(SyftObject, NodeRoute): __canonical_name__ = "PythonNodeRoute" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_3 + id: UID | None = None # type: ignore worker_settings: WorkerSettings proxy_target_uid: UID | None = None priority: int = 1 diff --git a/packages/syft/src/syft/store/blob_storage/seaweedfs.py b/packages/syft/src/syft/store/blob_storage/seaweedfs.py index 03c6f442c26..2d54e7c60ae 100644 --- a/packages/syft/src/syft/store/blob_storage/seaweedfs.py +++ b/packages/syft/src/syft/store/blob_storage/seaweedfs.py @@ -38,7 +38,8 @@ from ...types.blob_storage import SeaweedSecureFilePathLocation from ...types.blob_storage import SecureFilePathLocation from ...types.grid_url import GridURL -from ...types.syft_object import SYFT_OBJECT_VERSION_3 +from ...types.syft_object import SYFT_OBJECT_VERSION_4 +from ...types.uid import UID from ...util.constants import DEFAULT_TIMEOUT logger = logging.getLogger(__name__) @@ -52,10 +53,11 @@ @serializable() class SeaweedFSBlobDeposit(BlobDeposit): __canonical_name__ = "SeaweedFSBlobDeposit" - __version__ = SYFT_OBJECT_VERSION_3 + __version__ = SYFT_OBJECT_VERSION_4 urls: list[GridURL] size: int + proxy_node_uid: UID | None = None def write(self, data: BytesIO) -> SyftSuccess | SyftError: # relative @@ -90,9 +92,14 @@ def write(self, data: BytesIO) -> SyftSuccess | SyftError: start=1, ): if api is not None and api.connection is not None: - blob_url = api.connection.to_blob_route( - url.url_path, host=url.host_or_ip - ) + if self.proxy_node_uid is None: + blob_url = api.connection.to_blob_route( + url.url_path, host=url.host_or_ip + ) + else: + blob_url = api.connection.stream_via( + self.proxy_node_uid, url.url_path + ) else: blob_url = url diff --git a/packages/syft/src/syft/store/mongo_document_store.py b/packages/syft/src/syft/store/mongo_document_store.py index 9863cfcdeb2..0b7353eeac2 100644 --- a/packages/syft/src/syft/store/mongo_document_store.py +++ b/packages/syft/src/syft/store/mongo_document_store.py @@ -376,7 +376,7 @@ def _update( except Exception as e: return Err(f"Failed to update obj: {obj} with qk: {qk}. Error: {e}") - return Ok(obj) + return Ok(prev_obj) else: return Err(f"Failed to update obj {obj}, you have no permission") diff --git a/packages/syft/src/syft/types/grid_url.py b/packages/syft/src/syft/types/grid_url.py index 040969c2730..510a95d36c4 100644 --- a/packages/syft/src/syft/types/grid_url.py +++ b/packages/syft/src/syft/types/grid_url.py @@ -138,6 +138,10 @@ def base_url(self) -> str: def base_url_no_port(self) -> str: return f"{self.protocol}://{self.host_or_ip}" + @property + def url_no_protocol(self) -> str: + return f"{self.host_or_ip}:{self.port}{self.path}" + @property def url_path(self) -> str: return f"{self.path}{self.query_string}" diff --git a/packages/syft/src/syft/util/util.py b/packages/syft/src/syft/util/util.py index 22f003fefaf..d8098f55e1d 100644 --- a/packages/syft/src/syft/util/util.py +++ b/packages/syft/src/syft/util/util.py @@ -25,6 +25,7 @@ import platform import random import re +import secrets from secrets import randbelow import socket import sys @@ -918,6 +919,10 @@ def get_dev_mode() -> bool: return str_to_bool(os.getenv("DEV_MODE", "False")) +def generate_token() -> str: + return secrets.token_hex(64) + + def sanitize_html(html: str) -> str: policy = { "tags": ["svg", "strong", "rect", "path", "circle"], diff --git a/tests/integration/network/gateway_test.py b/tests/integration/network/gateway_test.py index 9c42a9e9687..48cddf8ce4c 100644 --- a/tests/integration/network/gateway_test.py +++ b/tests/integration/network/gateway_test.py @@ -112,6 +112,10 @@ def test_domain_connect_to_gateway( _remove_existing_peers(domain_client) _remove_existing_peers(gateway_client) + # Disable automatic acceptance of association requests + res = gateway_client.settings.allow_association_request_auto_approval(enable=False) + assert isinstance(res, SyftSuccess) + # connecting the domain to the gateway result = domain_client.connect_to_gateway(gateway_client) assert isinstance(result, Request) @@ -569,6 +573,7 @@ def test_add_route_on_peer(set_env_var, gateway_port: int, domain_1_port: int) - @pytest.mark.network +@pytest.mark.flaky(reruns=2, reruns_delay=2) def test_delete_route_on_peer( set_env_var, gateway_port: int, domain_1_port: int ) -> None: @@ -614,7 +619,7 @@ def test_delete_route_on_peer( # gateway delete the routes for the domain res = gateway_client.api.services.network.delete_route_on_peer( - peer=domain_peer, route_id=new_route.id + peer=domain_peer, route=new_route ) assert isinstance(res, SyftSuccess) gateway_peer = domain_client.peers[0] @@ -912,3 +917,61 @@ def test_peer_health_check(set_env_var, gateway_port: int, domain_1_port: int) - # Remove existing peers assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) + + +@pytest.mark.network +def test_reverse_tunnel_connection(domain_1_port: int, gateway_port: int): + # login to the domain and gateway + + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + domain_client: DomainClient = sy.login( + port=domain_1_port, email="info@openmined.org", password="changethis" + ) + + res = gateway_client.settings.allow_association_request_auto_approval(enable=False) + + # Try removing existing peers just to make sure + _remove_existing_peers(domain_client) + _remove_existing_peers(gateway_client) + + # connecting the domain to the gateway + result = domain_client.connect_to_gateway(gateway_client, reverse_tunnel=True) + + assert isinstance(result, Request) + assert isinstance(result.changes[0], AssociationRequestChange) + + assert len(domain_client.peers) == 1 + + # Domain's peer is a gateway and vice-versa + domain_peer = domain_client.peers[0] + assert domain_peer.node_type == NodeType.GATEWAY + assert domain_peer.node_routes[0].rtunnel_token is None + assert len(gateway_client.peers) == 0 + + gateway_client_root = gateway_client.login( + email="info@openmined.org", password="changethis" + ) + res = gateway_client_root.api.services.request.get_all()[-1].approve() + assert not isinstance(res, SyftError) + + time.sleep(90) + + gateway_peers = gateway_client.api.services.network.get_all_peers() + assert len(gateway_peers) == 1 + assert len(gateway_peers[0].node_routes) == 1 + assert gateway_peers[0].node_routes[0].rtunnel_token is not None + + proxy_domain_client = gateway_client.peers[0] + + assert isinstance(proxy_domain_client, DomainClient) + assert isinstance(domain_peer, NodePeer) + assert gateway_client.name == domain_peer.name + assert domain_client.name == proxy_domain_client.name + + assert not isinstance(proxy_domain_client.datasets.get_all(), SyftError) + + # Try removing existing peers just to make sure + _remove_existing_peers(gateway_client) + _remove_existing_peers(domain_client) diff --git a/tox.ini b/tox.ini index d15fd50c6da..7955837a2ae 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,7 @@ [tox] envlist = + dev.k8s.launch.domain + dev.k8s.launch.gateway dev.k8s.registry dev.k8s.start dev.k8s.deploy @@ -469,7 +471,7 @@ commands = # Creating test-domain-1 cluster on port 9082 bash -c '\ - export CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} CLUSTER_HTTP_PORT=9082 && \ + export CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} CLUSTER_HTTP_PORT=9082 DEVSPACE_PROFILE=domain-tunnel && \ tox -e dev.k8s.start && \ tox -e dev.k8s.deploy' @@ -872,6 +874,23 @@ commands = bash -c 'devspace cleanup images --kube-context k3d-${CLUSTER_NAME} --no-warn --namespace syft --var CONTAINER_REGISTRY=k3d-registry.localhost:5800 || true' bash -c 'kubectl --context k3d-${CLUSTER_NAME} delete namespace syft --now=true || true' +[testenv:dev.k8s.render] +description = Dump devspace rendered chargs for debugging. Save in `packages/grid/out.render` +changedir = {toxinidir}/packages/grid +passenv = HOME, USER, DEVSPACE_PROFILE +setenv= + OUTPUT_DIR = {env:OUTPUT_DIR:./.devspace/rendered} +allowlist_externals = + bash +commands = + bash -c '\ + if [[ -n "${DEVSPACE_PROFILE}" ]]; then export DEVSPACE_PROFILE="-p ${DEVSPACE_PROFILE}"; fi && \ + rm -rf ${OUTPUT_DIR} && \ + mkdir -p ${OUTPUT_DIR} && \ + echo "profile: $DEVSPACE_PROFILE" && \ + devspace print ${DEVSPACE_PROFILE} > ${OUTPUT_DIR}/config.txt && \ + devspace deploy --render --skip-build --no-warn ${DEVSPACE_PROFILE} --namespace syft --var CONTAINER_REGISTRY=k3d-registry.localhost:5800 > ${OUTPUT_DIR}/chart.yaml' + [testenv:dev.k8s.launch.gateway] description = Launch a single gateway on K8s passenv = HOME, USER @@ -886,7 +905,7 @@ commands = tox -e dev.k8s.{posargs:deploy} [testenv:dev.k8s.launch.domain] -description = Launch a single domain on K8s +description = Launch a single domain on K8s passenv = HOME, USER setenv= CLUSTER_NAME = {env:CLUSTER_NAME:test-domain-1}