diff --git a/website/docs/installation/weave-gitops.mdx b/website/docs/installation/weave-gitops.mdx index 752a3ea394..bd771639e4 100644 --- a/website/docs/installation/weave-gitops.mdx +++ b/website/docs/installation/weave-gitops.mdx @@ -103,7 +103,7 @@ import TabItem from "@theme/TabItem"; ```bash -curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.10.2/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp +curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.12.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp sudo mv /tmp/gitops /usr/local/bin gitops version ``` diff --git a/website/docs/references/cli-reference/gitops.md b/website/docs/references/cli-reference/gitops.md index 63860a39cb..3742895f5e 100644 --- a/website/docs/references/cli-reference/gitops.md +++ b/website/docs/references/cli-reference/gitops.md @@ -44,4 +44,4 @@ Command line utility for managing Kubernetes applications via GitOps. * [gitops set](gitops_set.md) - Sets one or many Weave GitOps CLI configs or resources * [gitops version](gitops_version.md) - Display gitops version -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_beta.md b/website/docs/references/cli-reference/gitops_beta.md index 05efc0c739..d3f5e7569a 100644 --- a/website/docs/references/cli-reference/gitops_beta.md +++ b/website/docs/references/cli-reference/gitops_beta.md @@ -24,4 +24,4 @@ This component contains unstable or still-in-development functionality * [gitops](gitops.md) - Weave GitOps * [gitops beta run](gitops_beta_run.md) - Set up an interactive sync between your cluster and your local file system -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_beta_run.md b/website/docs/references/cli-reference/gitops_beta_run.md index f6484dcd3a..d908ce531d 100644 --- a/website/docs/references/cli-reference/gitops_beta_run.md +++ b/website/docs/references/cli-reference/gitops_beta_run.md @@ -31,6 +31,11 @@ gitops beta run ./clusters/default/dev --root-dir ./clusters/default git clone https://github.com/stefanprodan/podinfo cd podinfo gitops beta run ./deploy/overlays/dev --timeout 3m --port-forward namespace=dev,resource=svc/backend,port=9898:9898 + +# Run the sync on the podinfo Helm chart. Please note that file Chart.yaml must exist in the directory. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./chart/podinfo --timeout 3m --port-forward namespace=flux-system,resource=svc/run-dev-helm-podinfo,port=9898:9898 ``` ### Options @@ -42,14 +47,15 @@ gitops beta run ./deploy/overlays/dev --timeout 3m --port-forward namespace=dev, --context string The name of the kubeconfig context to use --dashboard-hashed-password string GitOps Dashboard password in BCrypt hash format --dashboard-port string GitOps Dashboard port (default "9001") - --flux-version string The version of Flux to install. (default "0.36.0") + --flux-version string The version of Flux to install. (default "0.37.0") -h, --help help for run --no-bootstrap Disable bootstrapping at shutdown. --no-session Disable session management. If not specified, the session will be enabled by default. --port-forward string Forward the port from a cluster's resource to your local machine i.e. 'port=8080:8080,resource=svc/app'. --root-dir string Specify the root directory to watch for changes. If not specified, the root of Git repository will be used. - --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-91b93932-dirty") + --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-9229d8f5-dirty") --session-namespace string Specify the namespace of the session. (default "default") + --skip-dashboard-install Skip installation of the Dashboard. This also disables the prompt asking whether the Dashboard should be installed. --skip-resource-cleanup Skip resource cleanup. If not specified, the GitOps Run resources will be deleted by default. --timeout duration The timeout for operations during GitOps Run. (default 5m0s) ``` diff --git a/website/docs/references/cli-reference/gitops_check.md b/website/docs/references/cli-reference/gitops_check.md index 20b41a5acf..a2768cd696 100644 --- a/website/docs/references/cli-reference/gitops_check.md +++ b/website/docs/references/cli-reference/gitops_check.md @@ -36,4 +36,4 @@ gitops check * [gitops](gitops.md) - Weave GitOps -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_completion.md b/website/docs/references/cli-reference/gitops_completion.md index 15fe36d0c1..c7e485eacf 100644 --- a/website/docs/references/cli-reference/gitops_completion.md +++ b/website/docs/references/cli-reference/gitops_completion.md @@ -33,4 +33,4 @@ See each sub-command's help for details on how to use the generated script. * [gitops completion powershell](gitops_completion_powershell.md) - Generate the autocompletion script for powershell * [gitops completion zsh](gitops_completion_zsh.md) - Generate the autocompletion script for zsh -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_completion_bash.md b/website/docs/references/cli-reference/gitops_completion_bash.md index c0c98a454f..1dc93cb59d 100644 --- a/website/docs/references/cli-reference/gitops_completion_bash.md +++ b/website/docs/references/cli-reference/gitops_completion_bash.md @@ -52,4 +52,4 @@ gitops completion bash * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_completion_fish.md b/website/docs/references/cli-reference/gitops_completion_fish.md index a39175422d..8a36fb553b 100644 --- a/website/docs/references/cli-reference/gitops_completion_fish.md +++ b/website/docs/references/cli-reference/gitops_completion_fish.md @@ -43,4 +43,4 @@ gitops completion fish [flags] * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_completion_powershell.md b/website/docs/references/cli-reference/gitops_completion_powershell.md index 67d39735c6..c29408c27f 100644 --- a/website/docs/references/cli-reference/gitops_completion_powershell.md +++ b/website/docs/references/cli-reference/gitops_completion_powershell.md @@ -40,4 +40,4 @@ gitops completion powershell [flags] * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_completion_zsh.md b/website/docs/references/cli-reference/gitops_completion_zsh.md index 7361d6df8e..59d3e29492 100644 --- a/website/docs/references/cli-reference/gitops_completion_zsh.md +++ b/website/docs/references/cli-reference/gitops_completion_zsh.md @@ -54,4 +54,4 @@ gitops completion zsh [flags] * [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_create.md b/website/docs/references/cli-reference/gitops_create.md index 2850fc0280..2f94cf440d 100644 --- a/website/docs/references/cli-reference/gitops_create.md +++ b/website/docs/references/cli-reference/gitops_create.md @@ -37,4 +37,4 @@ gitops create dashboard ww-gitops \ * [gitops](gitops.md) - Weave GitOps * [gitops create dashboard](gitops_create_dashboard.md) - Create a HelmRepository and HelmRelease to deploy Weave GitOps -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_get.md b/website/docs/references/cli-reference/gitops_get.md index b135b7d4ba..78ffc77d80 100644 --- a/website/docs/references/cli-reference/gitops_get.md +++ b/website/docs/references/cli-reference/gitops_get.md @@ -37,4 +37,4 @@ echo -n $PASSWORD | gitops get bcrypt-hash * [gitops get bcrypt-hash](gitops_get_bcrypt-hash.md) - Generates a hashed secret * [gitops get config](gitops_get_config.md) - Prints out the CLI configuration for Weave GitOps -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md b/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md index 5a7280e9ff..ae3fdad238 100644 --- a/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md +++ b/website/docs/references/cli-reference/gitops_get_bcrypt-hash.md @@ -36,4 +36,4 @@ echo -n $PASSWORD | gitops get bcrypt-hash * [gitops get](gitops_get.md) - Display one or many Weave GitOps resources -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_remove.md b/website/docs/references/cli-reference/gitops_remove.md index aaa695fb32..7386b7fdde 100644 --- a/website/docs/references/cli-reference/gitops_remove.md +++ b/website/docs/references/cli-reference/gitops_remove.md @@ -24,4 +24,4 @@ Remove various components of Weave GitOps * [gitops](gitops.md) - Weave GitOps * [gitops remove run](gitops_remove_run.md) - Remove GitOps Run sessions -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_set.md b/website/docs/references/cli-reference/gitops_set.md index 0e467263fc..b2d2b68123 100644 --- a/website/docs/references/cli-reference/gitops_set.md +++ b/website/docs/references/cli-reference/gitops_set.md @@ -32,4 +32,4 @@ gitops set config analytics true * [gitops](gitops.md) - Weave GitOps * [gitops set config](gitops_set_config.md) - Set the CLI configuration for Weave GitOps -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/docs/references/cli-reference/gitops_version.md b/website/docs/references/cli-reference/gitops_version.md index a705b6f36a..d7b0328cfb 100644 --- a/website/docs/references/cli-reference/gitops_version.md +++ b/website/docs/references/cli-reference/gitops_version.md @@ -27,4 +27,4 @@ gitops version [flags] * [gitops](gitops.md) - Weave GitOps -###### Auto generated by spf13/cobra on 24-Nov-2022 +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/_components/CurlCodeBlock.jsx b/website/versioned_docs/version-0.12.0/_components/CurlCodeBlock.jsx new file mode 100644 index 0000000000..b27993ae63 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/_components/CurlCodeBlock.jsx @@ -0,0 +1,24 @@ +import React from "react"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +export default function CurlCodeBlock({ localPath, hostedPath, content }) { + return ( + <> + + {() => ( + + curl -o {localPath} {window.location.protocol} + //{window.location.host} + {hostedPath} + + )} + + + + {content} + + + ); +} diff --git a/website/versioned_docs/version-0.12.0/_components/TierLabel.jsx b/website/versioned_docs/version-0.12.0/_components/TierLabel.jsx new file mode 100644 index 0000000000..66b0bae5cb --- /dev/null +++ b/website/versioned_docs/version-0.12.0/_components/TierLabel.jsx @@ -0,0 +1,32 @@ +import React from "react"; +import Link from "@docusaurus/Link"; +import useGlobalData from "@docusaurus/useGlobalData"; + +const containerStyle = { + fontSize: 16, + marginLeft: 4, + fontVariant: "all-small-caps", +}; + +// This determines the current version of the docs you're looking at +// E.g. /docs/next or /docs/0.2.5 +const getCurrentVersionPath = () => { + const { "docusaurus-plugin-content-docs": data } = useGlobalData(); + const currentVersion = data?.default?.versions?.find( + (v) => v.name === "current" + ); + // Fallback to /docs just in case. Not sure if this is async etc. + return currentVersion?.path || "/docs"; +}; + +export default function TierLabel({ tiers }) { + return ( + + {tiers} + + ); +} diff --git a/website/versioned_docs/version-0.12.0/assets/example-enterprise-helm.yaml b/website/versioned_docs/version-0.12.0/assets/example-enterprise-helm.yaml new file mode 100644 index 0000000000..d436b836a6 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/assets/example-enterprise-helm.yaml @@ -0,0 +1,48 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weave-gitops-enterprise-charts + namespace: flux-system +spec: + interval: 60m + secretRef: + name: weave-gitops-enterprise-credentials + url: https://charts.dev.wkp.weave.works/releases/charts-v3 +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops-enterprise + namespace: flux-system +spec: + chart: + spec: + interval: 65m + chart: mccp + sourceRef: + kind: HelmRepository + name: weave-gitops-enterprise-charts + namespace: flux-system + version: 0.11.0 + install: + crds: CreateReplace + upgrade: + crds: CreateReplace + interval: 50m + values: + # -- Configure TLS settings if needed + # tls: + # -- Can be disabled if TLS is handled by a user-provided ingress controller + # enabled: true + # -- optionally specify a TLS secret + # secretName: null + config: + capi: + repositoryURL: https://github.com/$GITHUB_USER/fleet-infra + # -- Can be changed depending on your git repo structure + # repositoryPath: ./clusters/management/clusters + # repositoryClustersPath: ./cluster + git: + type: github + # -- Change if using on-prem github/gitlab + # hostname: https://github.com diff --git a/website/versioned_docs/version-0.12.0/assets/templates/.keep b/website/versioned_docs/version-0.12.0/assets/templates/.keep new file mode 100644 index 0000000000..dc92bc0885 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/assets/templates/.keep @@ -0,0 +1 @@ +"# keep" \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/assets/templates/capd-template.yaml b/website/versioned_docs/version-0.12.0/assets/templates/capd-template.yaml new file mode 100644 index 0000000000..95c0d3bfa2 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/assets/templates/capd-template.yaml @@ -0,0 +1,158 @@ +apiVersion: capi.weave.works/v1alpha1 +kind: CAPITemplate +metadata: + name: cluster-template-development + namespace: default + labels: + weave.works/template-type: cluster +spec: + description: A simple CAPD template + params: + - name: CLUSTER_NAME + required: true + description: This is used for the cluster naming. + - name: NAMESPACE + description: Namespace to create the cluster in + - name: KUBERNETES_VERSION + description: Kubernetes version to use for the cluster + options: ["1.19.11", "1.21.1", "1.22.0", "1.23.3"] + - name: CONTROL_PLANE_MACHINE_COUNT + description: Number of control planes + options: ["1", "2", "3"] + - name: WORKER_MACHINE_COUNT + description: Number of worker machines + resourcetemplates: + - apiVersion: gitops.weave.works/v1alpha1 + kind: GitopsCluster + metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + labels: + weave.works/capi: bootstrap + spec: + capiClusterRef: + name: "${CLUSTER_NAME}" + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + labels: + cni: calico + spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" + replicas: "${CONTROL_PLANE_MACHINE_COUNT}" + version: "${KUBERNETES_VERSION}" + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + spec: + template: + spec: {} + - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + spec: + clusterName: "${CLUSTER_NAME}" + replicas: "${WORKER_MACHINE_COUNT}" + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" + version: "${KUBERNETES_VERSION}" diff --git a/website/versioned_docs/version-0.12.0/cluster-management/_category_.json b/website/versioned_docs/version-0.12.0/cluster-management/_category_.json new file mode 100644 index 0000000000..349cd01853 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Cluster Management", + "position": 6 +} diff --git a/website/versioned_docs/version-0.12.0/cluster-management/add-applications.mdx b/website/versioned_docs/version-0.12.0/cluster-management/add-applications.mdx new file mode 100644 index 0000000000..e2aecc68d4 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/add-applications.mdx @@ -0,0 +1,35 @@ +--- +title: Add Applications +sidebar_position: 7 +hide_title: true +--- + +import TierLabel from "../\_components/TierLabel"; + +# Add Applications + +It is always useful to be able to install software packages to bootstrapped cluster. Weave GitOps Enterprise enables this by adding applications to target cluster through the UI by adding a kustomization or a helmrelease. Here is how we can do that: + +### Add an Applications to a target cluster (bootstrapped) + +1. At the applications page you can now find an add applications button. + +![Profiles Selection](./img/add-application-btn.png) + +2. A form will show up to you so that you can select the target cluster that you want to add application to. + +![Profiles Selection](./img/add-application-form.png) + +3. Then you will be able to select either source type of git repository or helm repository from the selected cluster. + +![Profiles Selection](./img/add-application-select-source.png) + +4. If you select git repository as source type, you will be abl to add application from kustomization. + +![Profiles Selection](./img/add-application-kustomization.png) + +5. If you select helm repository as source type, you will be abl to add application from helm release. And if you had selected the profiles Helm chart repository URL, you will be able to select a profile from the Profiles list that have been created in at [Profiles](profiles.mdx). + +![Profiles Selection](./img/add-application-helm-release.png) + +6. Last but not least now you will be able to create a pr to the target cluster. And you can see a new PR at you GitOps repository. diff --git a/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/calico-crs-configmap.yaml b/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/calico-crs-configmap.yaml new file mode 100644 index 0000000000..4293707dca --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/calico-crs-configmap.yaml @@ -0,0 +1,2441 @@ +apiVersion: v1 +data: + calico.yaml: "---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap + is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: + v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha + is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n + \ calico_backend: \"vxlan\"\n # On Azure, the underlying network has an MTU of + 1400, even though the network interface will have an MTU of 1500.\n # We set + this value to 1350 for “physical network MTU size minus 50” since we use VXLAN, + which uses a 50-byte header.\n # If enabling Wireguard, this value should be + changed to 1340 (Wireguard uses a 60-byte header).\n # https://docs.projectcalico.org/networking/mtu#determine-mtu-size\n + \ veth_mtu: \"1350\"\n \n # The CNI network configuration to install on each + node. The special\n # values in this config will be automatically populated.\n + \ cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": + \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n + \ \"log_level\": \"info\",\n \"log_file_path\": \"/var/log/calico/cni/cni.log\",\n + \ \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n + \ \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": + \"calico-ipam\"\n },\n \"policy\": {\n \"type\": + \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": + \"__KUBECONFIG_FILEPATH__\"\n }\n },\n {\n \"type\": + \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": + true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": + {\"bandwidth\": true}\n }\n ]\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\n\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: bgpconfigurations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: BGPConfiguration\n listKind: BGPConfigurationList\n plural: + bgpconfigurations\n singular: bgpconfiguration\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n description: + BGPConfiguration contains the configuration for any BGP routing.\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: BGPConfigurationSpec contains the + values of the BGP configuration.\n properties:\n asNumber:\n + \ description: 'ASNumber is the default AS number used by a node. + [Default:\n 64512]'\n format: int32\n type: + integer\n communities:\n description: Communities + is a list of BGP community values and their\n arbitrary names + for tagging routes.\n items:\n description: + Community contains standard or large community value\n and + its name.\n properties:\n name:\n description: + Name given to community value.\n type: string\n value:\n + \ description: Value must be of format `aa:nn` or `aa:nn:mm`.\n + \ For standard community use `aa:nn` format, where `aa` + and\n `nn` are 16 bit number. For large community use + `aa:nn:mm`\n format, where `aa`, `nn` and `mm` are 32 + bit number. Where,\n `aa` is an AS Number, `nn` and `mm` + are per-AS identifier.\n pattern: ^(\\d+):(\\d+)$|^(\\d+):(\\d+):(\\d+)$\n + \ type: string\n type: object\n type: + array\n listenPort:\n description: ListenPort + is the port where BGP protocol should listen.\n Defaults to + 179\n maximum: 65535\n minimum: 1\n type: + integer\n logSeverityScreen:\n description: 'LogSeverityScreen + is the log severity above which logs\n are sent to the stdout. + [Default: INFO]'\n type: string\n nodeToNodeMeshEnabled:\n + \ description: 'NodeToNodeMeshEnabled sets whether full node to + node\n BGP mesh is enabled. [Default: true]'\n type: + boolean\n prefixAdvertisements:\n description: + PrefixAdvertisements contains per-prefix advertisement\n configuration.\n + \ items:\n description: PrefixAdvertisement + configures advertisement properties\n for the specified CIDR.\n + \ properties:\n cidr:\n description: + CIDR for which properties should be advertised.\n type: + string\n communities:\n description: + Communities can be list of either community names\n already + defined in `Specs.Communities` or community value\n of + format `aa:nn` or `aa:nn:mm`. For standard community use\n `aa:nn` + format, where `aa` and `nn` are 16 bit number. For\n large + community use `aa:nn:mm` format, where `aa`, `nn` and\n `mm` + are 32 bit number. Where,`aa` is an AS Number, `nn` and\n `mm` + are per-AS identifier.\n items:\n type: + string\n type: array\n type: object\n + \ type: array\n serviceClusterIPs:\n description: + ServiceClusterIPs are the CIDR blocks from which service\n cluster + IPs are allocated. If specified, Calico will advertise these\n blocks, + as well as any cluster IPs within them.\n items:\n description: + ServiceClusterIPBlock represents a single allowed ClusterIP\n CIDR + block.\n properties:\n cidr:\n type: + string\n type: object\n type: array\n serviceExternalIPs:\n + \ description: ServiceExternalIPs are the CIDR blocks for Kubernetes\n + \ Service External IPs. Kubernetes Service ExternalIPs will + only be\n advertised if they are within one of these blocks.\n + \ items:\n description: ServiceExternalIPBlock + represents a single allowed\n External IP CIDR block.\n properties:\n + \ cidr:\n type: string\n type: + object\n type: array\n serviceLoadBalancerIPs:\n + \ description: ServiceLoadBalancerIPs are the CIDR blocks for + Kubernetes\n Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress\n + \ IPs will only be advertised if they are within one of these + blocks.\n items:\n description: ServiceLoadBalancerIPBlock + represents a single allowed\n LoadBalancer IP CIDR block.\n + \ properties:\n cidr:\n type: + string\n type: object\n type: array\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: bgppeers.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: BGPPeer\n listKind: BGPPeerList\n plural: bgppeers\n + \ singular: bgppeer\n scope: Cluster\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: BGPPeerSpec contains the specification + for a BGPPeer resource.\n properties:\n asNumber:\n + \ description: The AS Number of the peer.\n format: + int32\n type: integer\n keepOriginalNextHop:\n + \ description: Option to keep the original nexthop field when + routes\n are sent to a BGP Peer. Setting \"true\" configures + the selected BGP\n Peers node to use the \"next hop keep;\" + instead of \"next hop self;\"(default)\n in the specific branch + of the Node on \"bird.cfg\".\n type: boolean\n maxRestartTime:\n + \ description: Time to allow for software restart. When specified, + this\n is configured as the graceful restart timeout. When + not specified,\n the BIRD default of 120s is used.\n type: + string\n node:\n description: The node name identifying + the Calico node instance that\n is targeted by this peer. If + this is not set, and no nodeSelector\n is specified, then this + BGP peer selects all nodes in the cluster.\n type: string\n nodeSelector:\n + \ description: Selector for the nodes that should have this peering. + \ When\n this is set, the Node field must be empty.\n type: + string\n password:\n description: Optional BGP + password for the peerings generated by this\n BGPPeer resource.\n + \ properties:\n secretKeyRef:\n description: + Selects a key of a secret in the node pod's namespace.\n properties:\n + \ key:\n description: The key of + the secret to select from. Must be\n a valid secret + key.\n type: string\n name:\n + \ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n + \ TODO: Add other useful fields. apiVersion, kind, uid?'\n + \ type: string\n optional:\n description: + Specify whether the Secret or its key must be\n defined\n + \ type: boolean\n required:\n - + key\n type: object\n type: object\n peerIP:\n + \ description: The IP address of the peer followed by an optional + port\n number to peer with. If port number is given, format + should be `[]:port`\n or `:` for IPv4. If + optional port number is not set,\n and this peer IP and ASNumber + belongs to a calico/node with ListenPort\n set in BGPConfiguration, + then we use that port to peer.\n type: string\n peerSelector:\n + \ description: Selector for the remote nodes to peer with. When + this\n is set, the PeerIP and ASNumber fields must be empty. + \ For each\n peering between the local node and selected remote + nodes, we configure\n an IPv4 peering if both ends have NodeBGPSpec.IPv4Address + specified,\n and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address + specified. The\n remote AS number comes from the remote node’s + NodeBGPSpec.ASNumber,\n or the global default if that is not + set.\n type: string\n sourceAddress:\n description: + Specifies whether and how to configure a source address\n for + the peerings generated by this BGPPeer resource. Default value\n \"UseNodeIP\" + means to configure the node IP as the source address. \"None\"\n means + not to configure a source address.\n type: string\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: blockaffinities.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: BlockAffinity\n listKind: BlockAffinityList\n plural: + blockaffinities\n singular: blockaffinity\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: BlockAffinitySpec contains the specification + for a BlockAffinity\n resource.\n properties:\n cidr:\n + \ type: string\n deleted:\n description: + Deleted indicates that this block affinity is being deleted.\n This + field is a string for compatibility with older releases that\n mistakenly + treat this field as a string.\n type: string\n node:\n + \ type: string\n state:\n type: + string\n required:\n - cidr\n - deleted\n + \ - node\n - state\n type: object\n + \ type: object\n served: true\n storage: true\nstatus:\n acceptedNames:\n + \ kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: clusterinformations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: ClusterInformation\n listKind: ClusterInformationList\n + \ plural: clusterinformations\n singular: clusterinformation\n scope: Cluster\n + \ versions:\n - name: v1\n schema:\n openAPIV3Schema:\n description: + ClusterInformation contains the cluster specific information.\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: ClusterInformationSpec contains + the values of describing\n the cluster.\n properties:\n + \ calicoVersion:\n description: CalicoVersion is + the version of Calico that the cluster\n is running\n type: + string\n clusterGUID:\n description: ClusterGUID + is the GUID of the cluster\n type: string\n clusterType:\n + \ description: ClusterType describes the type of the cluster\n + \ type: string\n datastoreReady:\n description: + DatastoreReady is used during significant datastore migrations\n to + signal to components such as Felix that it should wait before\n accessing + the datastore.\n type: boolean\n variant:\n description: + Variant declares which variant of Calico should be active.\n type: + string\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: + CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuilder.io/version: + (devel)\n creationTimestamp: null\n name: felixconfigurations.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: FelixConfiguration\n listKind: + FelixConfigurationList\n plural: felixconfigurations\n singular: felixconfiguration\n + \ scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ description: Felix Configuration contains the configuration for Felix.\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: FelixConfigurationSpec contains + the values of the Felix configuration.\n properties:\n allowIPIPPacketsFromWorkloads:\n + \ description: 'AllowIPIPPacketsFromWorkloads controls whether + Felix\n will add a rule to drop IPIP encapsulated traffic from + workloads\n [Default: false]'\n type: boolean\n + \ allowVXLANPacketsFromWorkloads:\n description: + 'AllowVXLANPacketsFromWorkloads controls whether Felix\n will + add a rule to drop VXLAN encapsulated traffic from workloads\n [Default: + false]'\n type: boolean\n awsSrcDstCheck:\n description: + 'Set source-destination-check on AWS EC2 instances. Accepted\n value + must be one of \"DoNothing\", \"Enabled\" or \"Disabled\". [Default:\n DoNothing]'\n + \ enum:\n - DoNothing\n - + Enable\n - Disable\n type: string\n bpfConnectTimeLoadBalancingEnabled:\n + \ description: 'BPFConnectTimeLoadBalancingEnabled when in BPF + mode,\n controls whether Felix installs the connection-time load + balancer. The\n connect-time load balancer is required for the + host to be able to\n reach Kubernetes services and it improves + the performance of pod-to-service\n connections. The only reason + to disable it is for debugging purposes. [Default:\n true]'\n + \ type: boolean\n bpfDataIfacePattern:\n description: + 'BPFDataIfacePattern is a regular expression that controls\n which + interfaces Felix should attach BPF programs to in order to\n catch + traffic to/from the network. This needs to match the interfaces\n that + Calico workload traffic flows over as well as any interfaces\n that + handle incoming traffic to nodeports and services from outside\n the + cluster. It should not match the workload interfaces (usually\n named + cali...). [Default: ^(en.*|eth.*|tunl0$)]'\n type: string\n bpfDisableUnprivileged:\n + \ description: 'BPFDisableUnprivileged, if enabled, Felix sets + the kernel.unprivileged_bpf_disabled\n sysctl to disable unprivileged + use of BPF. This ensures that unprivileged\n users cannot access + Calico''s BPF maps and cannot insert their own\n BPF programs + to interfere with Calico''s. [Default: true]'\n type: boolean\n + \ bpfEnabled:\n description: 'BPFEnabled, if enabled + Felix will use the BPF dataplane.\n [Default: false]'\n type: + boolean\n bpfExtToServiceConnmark:\n description: + 'BPFExtToServiceConnmark in BPF mode, control a 32bit\n mark + that is set on connections from an external client to a local\n service. + This mark allows us to control how packets of that connection\n are + routed within the host and how is routing intepreted by RPF\n check. + [Default: 0]'\n type: integer\n bpfExternalServiceMode:\n + \ description: 'BPFExternalServiceMode in BPF mode, controls how + connections\n from outside the cluster to services (node ports + and cluster IPs)\n are forwarded to remote workloads. If set + to \"Tunnel\" then both\n request and response traffic is tunneled + to the remote node. If\n set to \"DSR\", the request traffic + is tunneled but the response traffic\n is sent directly from + the remote node. In \"DSR\" mode, the remote\n node appears + to use the IP of the ingress node; this requires a\n permissive + L2 network. [Default: Tunnel]'\n type: string\n bpfKubeProxyEndpointSlicesEnabled:\n + \ description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, + controls\n whether Felix's embedded kube-proxy accepts EndpointSlices + or not.\n type: boolean\n bpfKubeProxyIptablesCleanupEnabled:\n + \ description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled + in BPF\n mode, Felix will proactively clean up the upstream Kubernetes + kube-proxy''s\n iptables chains. Should only be enabled if kube-proxy + is not running. [Default:\n true]'\n type: + boolean\n bpfKubeProxyMinSyncPeriod:\n description: + 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the\n minimum + time between updates to the dataplane for Felix''s embedded\n kube-proxy. + \ Lower values give reduced set-up latency. Higher values\n reduce + Felix CPU usage by batching up more work. [Default: 1s]'\n type: + string\n bpfLogLevel:\n description: 'BPFLogLevel + controls the log level of the BPF programs\n when in BPF dataplane + mode. One of \"Off\", \"Info\", or \"Debug\". The\n logs are + emitted to the BPF trace pipe, accessible with the command\n `tc + exec bpf debug`. [Default: Off].'\n type: string\n chainInsertMode:\n + \ description: 'ChainInsertMode controls whether Felix hooks the + kernel’s\n top-level iptables chains by inserting a rule at the + top of the\n chain or by appending a rule at the bottom. insert + is the safe default\n since it prevents Calico’s rules from being + bypassed. If you switch\n to append mode, be sure that the other + rules in the chains signal\n acceptance by falling through to + the Calico rules, otherwise the\n Calico policy will be bypassed. + [Default: insert]'\n type: string\n dataplaneDriver:\n + \ type: string\n debugDisableLogDropping:\n type: + boolean\n debugMemoryProfilePath:\n type: string\n + \ debugSimulateCalcGraphHangAfter:\n type: string\n + \ debugSimulateDataplaneHangAfter:\n type: string\n + \ defaultEndpointToHostAction:\n description: 'DefaultEndpointToHostAction + controls what happens to\n traffic that goes from a workload + endpoint to the host itself (after\n the traffic hits the endpoint + egress policy). By default Calico\n blocks traffic from workload + endpoints to the host itself with an\n iptables “DROP” action. + If you want to allow some or all traffic\n from endpoint to host, + set this parameter to RETURN or ACCEPT. Use\n RETURN if you have + your own rules in the iptables “INPUT” chain;\n Calico will insert + its rules at the top of that chain, then “RETURN”\n packets to + the “INPUT” chain once it has completed processing workload\n endpoint + egress policy. Use ACCEPT to unconditionally accept packets\n from + workloads after processing workload endpoint egress policy.\n [Default: + Drop]'\n type: string\n deviceRouteProtocol:\n + \ description: This defines the route protocol added to programmed + device\n routes, by default this will be RTPROT_BOOT when left + blank.\n type: integer\n deviceRouteSourceAddress:\n + \ description: This is the source address to use on programmed + device\n routes. By default the source address is left blank, + leaving the\n kernel to choose the source address used.\n type: + string\n disableConntrackInvalidCheck:\n type: + boolean\n endpointReportingDelay:\n type: string\n + \ endpointReportingEnabled:\n type: boolean\n externalNodesList:\n + \ description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes\n + \ which may source tunnel traffic and have the tunneled traffic + be\n accepted at calico nodes.\n items:\n + \ type: string\n type: array\n failsafeInboundHostPorts:\n + \ description: 'FailsafeInboundHostPorts is a list of UDP/TCP + ports\n and CIDRs that Felix will allow incoming traffic to + host endpoints\n on irrespective of the security policy. This + is useful to avoid\n accidentally cutting off a host with incorrect + configuration. For\n back-compatibility, if the protocol is + not specified, it defaults\n to \"tcp\". If a CIDR is not specified, + it will allow traffic from\n all addresses. To disable all + inbound host ports, use the value\n none. The default value + allows ssh access and DHCP. [Default: tcp:22,\n udp:68, tcp:179, + tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'\n items:\n + \ description: ProtoPort is combination of protocol, port, and + CIDR.\n Protocol and port must be specified.\n properties:\n + \ net:\n type: string\n port:\n + \ type: integer\n protocol:\n type: + string\n required:\n - port\n - + protocol\n type: object\n type: array\n failsafeOutboundHostPorts:\n + \ description: 'FailsafeOutboundHostPorts is a list of UDP/TCP + ports\n and CIDRs that Felix will allow outgoing traffic from + host endpoints\n to irrespective of the security policy. This + is useful to avoid\n accidentally cutting off a host with incorrect + configuration. For\n back-compatibility, if the protocol is + not specified, it defaults\n to \"tcp\". If a CIDR is not specified, + it will allow traffic from\n all addresses. To disable all + outbound host ports, use the value\n none. The default value + opens etcd''s standard ports to ensure that\n Felix does not + get cut off from etcd as well as allowing DHCP and\n DNS. [Default: + tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666,\n tcp:6667, + udp:53, udp:67]'\n items:\n description: ProtoPort + is combination of protocol, port, and CIDR.\n Protocol and + port must be specified.\n properties:\n net:\n + \ type: string\n port:\n type: + integer\n protocol:\n type: string\n + \ required:\n - port\n - + protocol\n type: object\n type: array\n featureDetectOverride:\n + \ description: FeatureDetectOverride is used to override the feature\n + \ detection. Values are specified in a comma separated list + with no\n spaces, example; \"SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=\".\n + \ \"true\" or \"false\" will force the feature, empty or omitted + values\n are auto-detected.\n type: string\n + \ genericXDPEnabled:\n description: 'GenericXDPEnabled + enables Generic XDP so network cards\n that don''t support XDP + offload or driver modes can use XDP. This\n is not recommended + since it doesn''t provide better performance\n than iptables. + [Default: false]'\n type: boolean\n healthEnabled:\n + \ type: boolean\n healthHost:\n type: + string\n healthPort:\n type: integer\n interfaceExclude:\n + \ description: 'InterfaceExclude is a comma-separated list of + interfaces\n that Felix should exclude when monitoring for host + endpoints. The\n default value ensures that Felix ignores Kubernetes'' + IPVS dummy\n interface, which is used internally by kube-proxy. + If you want to\n exclude multiple interface names using a single + value, the list\n supports regular expressions. For regular expressions + you must wrap\n the value with ''/''. For example having values + ''/^kube/,veth1''\n will exclude all interfaces that begin with + ''kube'' and also the\n interface ''veth1''. [Default: kube-ipvs0]'\n + \ type: string\n interfacePrefix:\n description: + 'InterfacePrefix is the interface name prefix that identifies\n workload + endpoints and so distinguishes them from host endpoint\n interfaces. + Note: in environments other than bare metal, the orchestrators\n configure + this appropriately. For example our Kubernetes and Docker\n integrations + set the ‘cali’ value, and our OpenStack integration\n sets the + ‘tap’ value. [Default: cali]'\n type: string\n interfaceRefreshInterval:\n + \ description: InterfaceRefreshInterval is the period at which + Felix\n rescans local interfaces to verify their state. The + rescan can be\n disabled by setting the interval to 0.\n type: + string\n ipipEnabled:\n type: boolean\n ipipMTU:\n + \ description: 'IPIPMTU is the MTU to set on the tunnel device. + See\n Configuring MTU [Default: 1440]'\n type: + integer\n ipsetsRefreshInterval:\n description: + 'IpsetsRefreshInterval is the period at which Felix re-checks\n all + iptables state to ensure that no other process has accidentally\n broken + Calico’s rules. Set to 0 to disable iptables refresh. [Default:\n 90s]'\n + \ type: string\n iptablesBackend:\n description: + IptablesBackend specifies which backend of iptables will\n be + used. The default is legacy.\n type: string\n iptablesFilterAllowAction:\n + \ type: string\n iptablesLockFilePath:\n description: + 'IptablesLockFilePath is the location of the iptables\n lock + file. You may need to change this if the lock file is not in\n its + standard location (for example if you have mapped it into Felix’s\n container + at a different path). [Default: /run/xtables.lock]'\n type: string\n + \ iptablesLockProbeInterval:\n description: 'IptablesLockProbeInterval + is the time that Felix will\n wait between attempts to acquire + the iptables lock if it is not\n available. Lower values make + Felix more responsive when the lock\n is contended, but use more + CPU. [Default: 50ms]'\n type: string\n iptablesLockTimeout:\n + \ description: 'IptablesLockTimeout is the time that Felix will + wait\n for the iptables lock, or 0, to disable. To use this feature, + Felix\n must share the iptables lock file with all other processes + that\n also take the lock. When running Felix inside a container, + this\n requires the /run directory of the host to be mounted + into the calico/node\n or calico/felix container. [Default: 0s + disabled]'\n type: string\n iptablesMangleAllowAction:\n + \ type: string\n iptablesMarkMask:\n description: + 'IptablesMarkMask is the mask that Felix selects its\n IPTables + Mark bits from. Should be a 32 bit hexadecimal number with\n at + least 8 bits set, none of which clash with any other mark bits\n in + use on the system. [Default: 0xff000000]'\n format: int32\n type: + integer\n iptablesNATOutgoingInterfaceFilter:\n type: + string\n iptablesPostWriteCheckInterval:\n description: + 'IptablesPostWriteCheckInterval is the period after Felix\n has + done a write to the dataplane that it schedules an extra read\n back + in order to check the write was not clobbered by another process.\n This + should only occur if another application on the system doesn’t\n respect + the iptables lock. [Default: 1s]'\n type: string\n iptablesRefreshInterval:\n + \ description: 'IptablesRefreshInterval is the period at which + Felix\n re-checks the IP sets in the dataplane to ensure that + no other process\n has accidentally broken Calico''s rules. + Set to 0 to disable IP\n sets refresh. Note: the default for + this value is lower than the\n other refresh intervals as a + workaround for a Linux kernel bug that\n was fixed in kernel + version 4.11. If you are using v4.11 or greater\n you may want + to set this to, a higher value to reduce Felix CPU\n usage. + [Default: 10s]'\n type: string\n ipv6Support:\n + \ type: boolean\n kubeNodePortRanges:\n description: + 'KubeNodePortRanges holds list of port ranges used for\n service + node ports. Only used if felix detects kube-proxy running\n in + ipvs mode. Felix uses these ranges to separate host and workload\n traffic. + [Default: 30000:32767].'\n items:\n anyOf:\n + \ - type: integer\n - type: string\n + \ pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n logFilePath:\n description: + 'LogFilePath is the full path to the Felix log. Set to\n none + to disable file logging. [Default: /var/log/calico/felix.log]'\n type: + string\n logPrefix:\n description: 'LogPrefix + is the log prefix that Felix uses when rendering\n LOG rules. + [Default: calico-packet]'\n type: string\n logSeverityFile:\n + \ description: 'LogSeverityFile is the log severity above which + logs\n are sent to the log file. [Default: Info]'\n type: + string\n logSeverityScreen:\n description: 'LogSeverityScreen + is the log severity above which logs\n are sent to the stdout. + [Default: Info]'\n type: string\n logSeveritySys:\n + \ description: 'LogSeveritySys is the log severity above which + logs\n are sent to the syslog. Set to None for no logging to + syslog. [Default:\n Info]'\n type: string\n + \ maxIpsetSize:\n type: integer\n metadataAddr:\n + \ description: 'MetadataAddr is the IP address or domain name + of the\n server that can answer VM queries for cloud-init metadata. + In OpenStack,\n this corresponds to the machine running nova-api + (or in Ubuntu,\n nova-api-metadata). A value of none (case insensitive) + means that\n Felix should not set up any NAT rule for the metadata + path. [Default:\n 127.0.0.1]'\n type: string\n + \ metadataPort:\n description: 'MetadataPort is + the port of the metadata server. This,\n combined with global.MetadataAddr + (if not ‘None’), is used to set\n up a NAT rule, from 169.254.169.254:80 + to MetadataAddr:MetadataPort.\n In most cases this should not + need to be changed [Default: 8775].'\n type: integer\n mtuIfacePattern:\n + \ description: MTUIfacePattern is a regular expression that controls\n + \ which interfaces Felix should scan in order to calculate the + host's\n MTU. This should not match workload interfaces (usually + named cali...).\n type: string\n natOutgoingAddress:\n + \ description: NATOutgoingAddress specifies an address to use + when performing\n source NAT for traffic in a natOutgoing pool + that is leaving the\n network. By default the address used + is an address on the interface\n the traffic is leaving on + (ie it uses the iptables MASQUERADE target)\n type: string\n + \ natPortRange:\n anyOf:\n - + type: integer\n - type: string\n description: + NATPortRange specifies the range of ports that is used\n for + port mapping when doing outgoing NAT. When unset the default\n behavior + of the network stack is used.\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n netlinkTimeout:\n type: string\n openstackRegion:\n + \ description: 'OpenstackRegion is the name of the region that + a particular\n Felix belongs to. In a multi-region Calico/OpenStack + deployment,\n this must be configured somehow for each Felix + (here in the datamodel,\n or in felix.cfg or the environment + on each compute node), and must\n match the [calico] openstack_region + value configured in neutron.conf\n on each node. [Default: Empty]'\n + \ type: string\n policySyncPathPrefix:\n description: + 'PolicySyncPathPrefix is used to by Felix to communicate\n policy + changes to external services, like Application layer policy.\n [Default: + Empty]'\n type: string\n prometheusGoMetricsEnabled:\n + \ description: 'PrometheusGoMetricsEnabled disables Go runtime + metrics\n collection, which the Prometheus client does by default, + when set\n to false. This reduces the number of metrics reported, + reducing\n Prometheus load. [Default: true]'\n type: + boolean\n prometheusMetricsEnabled:\n description: + 'PrometheusMetricsEnabled enables the Prometheus metrics\n server + in Felix if set to true. [Default: false]'\n type: boolean\n + \ prometheusMetricsHost:\n description: 'PrometheusMetricsHost + is the host that the Prometheus\n metrics server should bind + to. [Default: empty]'\n type: string\n prometheusMetricsPort:\n + \ description: 'PrometheusMetricsPort is the TCP port that the + Prometheus\n metrics server should bind to. [Default: 9091]'\n + \ type: integer\n prometheusProcessMetricsEnabled:\n + \ description: 'PrometheusProcessMetricsEnabled disables process + metrics\n collection, which the Prometheus client does by default, + when set\n to false. This reduces the number of metrics reported, + reducing\n Prometheus load. [Default: true]'\n type: + boolean\n removeExternalRoutes:\n description: + Whether or not to remove device routes that have not\n been + programmed by Felix. Disabling this will allow external applications\n to + also add device routes. This is enabled by default which means\n we + will remove externally added routes.\n type: boolean\n reportingInterval:\n + \ description: 'ReportingInterval is the interval at which Felix + reports\n its status into the datastore or 0 to disable. Must + be non-zero\n in OpenStack deployments. [Default: 30s]'\n type: + string\n reportingTTL:\n description: 'ReportingTTL + is the time-to-live setting for process-wide\n status reports. + [Default: 90s]'\n type: string\n routeRefreshInterval:\n + \ description: 'RouterefreshInterval is the period at which Felix + re-checks\n the routes in the dataplane to ensure that no other + process has\n accidentally broken Calico’s rules. Set to 0 to + disable route refresh.\n [Default: 90s]'\n type: + string\n routeSource:\n description: 'RouteSource + configures where Felix gets its routing\n information. - WorkloadIPs: + use workload endpoints to construct\n routes. - CalicoIPAM: the + default - use IPAM data to construct routes.'\n type: string\n + \ routeTableRange:\n description: Calico programs + additional Linux route tables for various\n purposes. RouteTableRange + specifies the indices of the route tables\n that Calico should + use.\n properties:\n max:\n type: + integer\n min:\n type: integer\n required:\n + \ - max\n - min\n type: + object\n serviceLoopPrevention:\n description: + 'When service IP advertisement is enabled, prevent routing\n loops + to service IPs that are not in use, by dropping or rejecting\n packets + that do not get DNAT''d by kube-proxy. Unless set to \"Disabled\",\n in + which case such routing loops continue to be allowed. [Default:\n Drop]'\n + \ type: string\n sidecarAccelerationEnabled:\n + \ description: 'SidecarAccelerationEnabled enables experimental + sidecar\n acceleration [Default: false]'\n type: + boolean\n usageReportingEnabled:\n description: + 'UsageReportingEnabled reports anonymous Calico version\n number + and cluster size to projectcalico.org. Logs warnings returned\n by + the usage server. For example, if a significant security vulnerability\n has + been discovered in the version of Calico being used. [Default:\n true]'\n + \ type: boolean\n usageReportingInitialDelay:\n + \ description: 'UsageReportingInitialDelay controls the minimum + delay\n before Felix makes a report. [Default: 300s]'\n type: + string\n usageReportingInterval:\n description: + 'UsageReportingInterval controls the interval at which\n Felix + makes reports. [Default: 86400s]'\n type: string\n useInternalDataplaneDriver:\n + \ type: boolean\n vxlanEnabled:\n type: + boolean\n vxlanMTU:\n description: 'VXLANMTU is + the MTU to set on the tunnel device. See\n Configuring MTU [Default: + 1440]'\n type: integer\n vxlanPort:\n type: + integer\n vxlanVNI:\n type: integer\n wireguardEnabled:\n + \ description: 'WireguardEnabled controls whether Wireguard is + enabled.\n [Default: false]'\n type: boolean\n + \ wireguardInterfaceName:\n description: 'WireguardInterfaceName + specifies the name to use for\n the Wireguard interface. [Default: + wg.calico]'\n type: string\n wireguardListeningPort:\n + \ description: 'WireguardListeningPort controls the listening + port used\n by Wireguard. [Default: 51820]'\n type: + integer\n wireguardMTU:\n description: 'WireguardMTU + controls the MTU on the Wireguard interface.\n See Configuring + MTU [Default: 1420]'\n type: integer\n wireguardRoutingRulePriority:\n + \ description: 'WireguardRoutingRulePriority controls the priority + value\n to use for the Wireguard routing rule. [Default: 99]'\n + \ type: integer\n xdpEnabled:\n description: + 'XDPEnabled enables XDP acceleration for suitable untracked\n incoming + deny rules. [Default: true]'\n type: boolean\n xdpRefreshInterval:\n + \ description: 'XDPRefreshInterval is the period at which Felix + re-checks\n all XDP state to ensure that no other process has + accidentally broken\n Calico''s BPF maps or attached programs. + Set to 0 to disable XDP\n refresh. [Default: 90s]'\n type: + string\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: + CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuilder.io/version: + (devel)\n creationTimestamp: null\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: GlobalNetworkPolicy\n listKind: + GlobalNetworkPolicyList\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n + \ scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n properties:\n applyOnForward:\n + \ description: ApplyOnForward indicates to apply the rules in + this policy\n on forward traffic.\n type: + boolean\n doNotTrack:\n description: DoNotTrack + indicates whether packets matched by the rules\n in this policy + should go through the data plane's connection tracking,\n such + as Linux conntrack. If True, the rules in this policy are\n applied + before any data plane connection tracking, and packets allowed\n by + this policy are marked as not to be tracked.\n type: boolean\n + \ egress:\n description: The ordered set of egress + rules. Each rule contains\n a set of packet match criteria + and a corresponding action to apply.\n items:\n description: + \"A Rule encapsulates a set of match criteria and an\n action. + \ Both selector-based security Policy and security Profiles\n reference + rules - separated out as a list of rules for both ingress\n and + egress packet matching. \\n Each positive match criteria has\n a + negated version, prefixed with ”Not”. All the match criteria\n within + a rule must be satisfied for a packet to match. A single\n rule + can contain the positive and negative version of a match\n and + both must be satisfied for the rule to match.\"\n properties:\n + \ action:\n type: string\n destination:\n + \ description: Destination contains the match criteria that + apply\n to destination entity.\n properties:\n + \ namespaceSelector:\n description: + \"NamespaceSelector is an optional field that\n contains + a selector expression. Only traffic that originates\n from + (or terminates at) endpoints within the selected\n namespaces + will be matched. When both NamespaceSelector\n and + Selector are defined on the same rule, then only workload\n endpoints + that are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n ingress:\n description: The ordered set + of ingress rules. Each rule contains\n a set of packet match + criteria and a corresponding action to apply.\n items:\n description: + \"A Rule encapsulates a set of match criteria and an\n action. + \ Both selector-based security Policy and security Profiles\n reference + rules - separated out as a list of rules for both ingress\n and + egress packet matching. \\n Each positive match criteria has\n a + negated version, prefixed with ”Not”. All the match criteria\n within + a rule must be satisfied for a packet to match. A single\n rule + can contain the positive and negative version of a match\n and + both must be satisfied for the rule to match.\"\n properties:\n + \ action:\n type: string\n destination:\n + \ description: Destination contains the match criteria that + apply\n to destination entity.\n properties:\n + \ namespaceSelector:\n description: + \"NamespaceSelector is an optional field that\n contains + a selector expression. Only traffic that originates\n from + (or terminates at) endpoints within the selected\n namespaces + will be matched. When both NamespaceSelector\n and + Selector are defined on the same rule, then only workload\n endpoints + that are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n namespaceSelector:\n description: NamespaceSelector + is an optional field for an expression\n used to select a pod + based on namespaces.\n type: string\n order:\n + \ description: Order is an optional field that specifies the order + in\n which the policy is applied. Policies with higher \"order\" + are applied\n after those with lower order. If the order is + omitted, it may be\n considered to be \"infinite\" - i.e. the + policy will be applied last. Policies\n with identical order + will be applied in alphanumerical order based\n on the Policy + \"Name\".\n type: number\n preDNAT:\n description: + PreDNAT indicates to apply the rules in this policy before\n any + DNAT.\n type: boolean\n selector:\n description: + \"The selector is an expression used to pick pick out\n the endpoints + that the policy should be applied to. \\n Selector\n expressions + follow this syntax: \\n \\tlabel == \\\"string_literal\\\"\n \\ + -> comparison, e.g. my_label == \\\"foo bar\\\" \\tlabel != \\\"string_literal\\\"\n + \ \\ -> not equal; also matches if label is not present \\tlabel + in\n { \\\"a\\\", \\\"b\\\", \\\"c\\\", ... } -> true if the + value of label X is\n one of \\\"a\\\", \\\"b\\\", \\\"c\\\" + \\tlabel not in { \\\"a\\\", \\\"b\\\", \\\"c\\\",\n ... } -> + \ true if the value of label X is not one of \\\"a\\\", \\\"b\\\",\n \\\"c\\\" + \\thas(label_name) -> True if that label is present \\t! expr\n -> + negation of expr \\texpr && expr -> Short-circuit and \\texpr\n || + expr -> Short-circuit or \\t( expr ) -> parens for grouping \\tall()\n or + the empty selector -> matches all endpoints. \\n Label names are\n allowed + to contain alphanumerics, -, _ and /. String literals are\n more + permissive but they do not support escape characters. \\n Examples\n (with + made-up labels): \\n \\ttype == \\\"webserver\\\" && deployment\n == + \\\"prod\\\" \\ttype in {\\\"frontend\\\", \\\"backend\\\"} \\tdeployment !=\n + \ \\\"dev\\\" \\t! has(label_name)\"\n type: + string\n serviceAccountSelector:\n description: + ServiceAccountSelector is an optional field for an expression\n used + to select a pod based on service accounts.\n type: string\n types:\n + \ description: \"Types indicates whether this policy applies to + ingress,\n or to egress, or to both. When not explicitly specified + (and so\n the value on creation is empty or nil), Calico defaults + Types according\n to what Ingress and Egress rules are present + in the policy. The\n default is: \\n - [ PolicyTypeIngress ], + if there are no Egress rules\n (including the case where there + are also no Ingress rules) \\n\n - [ PolicyTypeEgress ], if + there are Egress rules but no Ingress\n rules \\n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are\n both Ingress and Egress rules. + \\n When the policy is read back again,\n Types will always be + one of these values, never empty or nil.\"\n items:\n description: + PolicyType enumerates the possible values of the PolicySpec\n Types + field.\n type: string\n type: array\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: globalnetworksets.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: GlobalNetworkSet\n listKind: GlobalNetworkSetList\n plural: + globalnetworksets\n singular: globalnetworkset\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n description: + GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs\n that + share labels to allow rules to refer to them via selectors. The labels\n of + GlobalNetworkSet are not namespaced.\n properties:\n apiVersion:\n + \ description: 'APIVersion defines the versioned schema of this representation\n + \ of an object. Servers should convert recognized schemas to the latest\n + \ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: GlobalNetworkSetSpec contains the + specification for a NetworkSet\n resource.\n properties:\n + \ nets:\n description: The list of IP networks + that belong to this set.\n items:\n type: + string\n type: array\n type: object\n type: + object\n served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: hostendpoints.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: HostEndpoint\n listKind: HostEndpointList\n plural: + hostendpoints\n singular: hostendpoint\n scope: Cluster\n versions:\n - + name: v1\n schema:\n openAPIV3Schema:\n properties:\n apiVersion:\n + \ description: 'APIVersion defines the versioned schema of this representation\n + \ of an object. Servers should convert recognized schemas to the latest\n + \ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: HostEndpointSpec contains the specification + for a HostEndpoint\n resource.\n properties:\n expectedIPs:\n + \ description: \"The expected IP addresses (IPv4 and IPv6) of + the endpoint.\n If \\\"InterfaceName\\\" is not present, Calico + will look for an interface\n matching any of the IPs in the list + and apply policy to that. Note:\n \\tWhen using the selector + match criteria in an ingress or egress\n security Policy \\tor + Profile, Calico converts the selector into\n a set of IP addresses. + For host \\tendpoints, the ExpectedIPs field\n is used for that + purpose. (If only the interface \\tname is specified,\n Calico + does not learn the IPs of the interface for use in match\n \\tcriteria.)\"\n + \ items:\n type: string\n type: + array\n interfaceName:\n description: \"Either + \\\"*\\\", or the name of a specific Linux interface\n to apply + policy to; or empty. \\\"*\\\" indicates that this HostEndpoint\n governs + all traffic to, from or through the default network namespace\n of + the host named by the \\\"Node\\\" field; entering and leaving that\n namespace + via any interface, including those from/to non-host-networked\n local + workloads. \\n If InterfaceName is not \\\"*\\\", this HostEndpoint\n only + governs traffic that enters or leaves the host through the\n specific + interface named by InterfaceName, or - when InterfaceName\n is + empty - through the specific interface that has one of the IPs\n in + ExpectedIPs. Therefore, when InterfaceName is empty, at least\n one + expected IP must be specified. Only external interfaces (such\n as + “eth0”) are supported here; it isn't possible for a HostEndpoint\n to + protect traffic through a specific local workload interface.\n \\n + Note: Only some kinds of policy are implemented for \\\"*\\\" HostEndpoints;\n + \ initially just pre-DNAT policy. Please check Calico documentation\n + \ for the latest position.\"\n type: string\n + \ node:\n description: The node name identifying + the Calico node instance.\n type: string\n ports:\n + \ description: Ports contains the endpoint's named ports, which + may\n be referenced in security policy rules.\n items:\n + \ properties:\n name:\n type: + string\n port:\n type: integer\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n required:\n - name\n - + port\n - protocol\n type: object\n type: + array\n profiles:\n description: A list of identifiers + of security Profile objects that\n apply to this endpoint. + Each profile is applied in the order that\n they appear in + this list. Profile rules are applied after the selector-based\n security + policy.\n items:\n type: string\n type: + array\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: + CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuilder.io/version: + (devel)\n creationTimestamp: null\n name: ipamblocks.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: IPAMBlock\n listKind: IPAMBlockList\n + \ plural: ipamblocks\n singular: ipamblock\n scope: Cluster\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPAMBlockSpec contains the specification + for an IPAMBlock\n resource.\n properties:\n affinity:\n + \ type: string\n allocations:\n items:\n + \ type: integer\n # TODO: This nullable is + manually added in. We should update controller-gen\n # to handle + []*int properly itself.\n nullable: true\n type: + array\n attributes:\n items:\n properties:\n + \ handle_id:\n type: string\n secondary:\n + \ additionalProperties:\n type: + string\n type: object\n type: object\n + \ type: array\n cidr:\n type: + string\n deleted:\n type: boolean\n strictAffinity:\n + \ type: boolean\n unallocated:\n items:\n + \ type: integer\n type: array\n required:\n + \ - allocations\n - attributes\n - + cidr\n - strictAffinity\n - unallocated\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: ipamconfigs.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: IPAMConfig\n listKind: IPAMConfigList\n plural: ipamconfigs\n + \ singular: ipamconfig\n scope: Cluster\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPAMConfigSpec contains the specification + for an IPAMConfig\n resource.\n properties:\n autoAllocateBlocks:\n + \ type: boolean\n maxBlocksPerHost:\n description: + MaxBlocksPerHost, if non-zero, is the max number of blocks\n that + can be affine to each host.\n type: integer\n strictAffinity:\n + \ type: boolean\n required:\n - autoAllocateBlocks\n + \ - strictAffinity\n type: object\n type: + object\n served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: ipamhandles.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: IPAMHandle\n listKind: IPAMHandleList\n plural: ipamhandles\n + \ singular: ipamhandle\n scope: Cluster\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPAMHandleSpec contains the specification + for an IPAMHandle\n resource.\n properties:\n block:\n + \ additionalProperties:\n type: integer\n type: + object\n deleted:\n type: boolean\n handleID:\n + \ type: string\n required:\n - block\n + \ - handleID\n type: object\n type: object\n + \ served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: ippools.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: IPPool\n listKind: IPPoolList\n plural: ippools\n singular: + ippool\n scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: IPPoolSpec contains the specification + for an IPPool resource.\n properties:\n blockSize:\n + \ description: The block size to use for IP address assignments + from\n this pool. Defaults to 26 for IPv4 and 112 for IPv6.\n + \ type: integer\n cidr:\n description: + The pool CIDR.\n type: string\n disabled:\n description: + When disabled is true, Calico IPAM will not assign addresses\n from + this pool.\n type: boolean\n ipip:\n description: + 'Deprecated: this field is only used for APIv1 backwards\n compatibility. + Setting this field is not allowed, this field is\n for internal + use only.'\n properties:\n enabled:\n description: + When enabled is true, ipip tunneling will be used\n to + deliver packets to destinations within this pool.\n type: + boolean\n mode:\n description: The IPIP + mode. This can be one of \"always\" or \"cross-subnet\". A\n mode + of \"always\" will also use IPIP tunneling for routing to\n destination + IP addresses within this pool. A mode of \"cross-subnet\"\n will + only use IPIP tunneling when the destination node is on\n a + different subnet to the originating node. The default value\n (if + not specified) is \"always\".\n type: string\n type: + object\n ipipMode:\n description: Contains configuration + for IPIP tunneling for this pool.\n If not specified, then + this is defaulted to \"Never\" (i.e. IPIP tunneling\n is disabled).\n + \ type: string\n nat-outgoing:\n description: + 'Deprecated: this field is only used for APIv1 backwards\n compatibility. + Setting this field is not allowed, this field is\n for internal + use only.'\n type: boolean\n natOutgoing:\n description: + When nat-outgoing is true, packets sent from Calico networked\n containers + in this pool to destinations outside of this pool will\n be + masqueraded.\n type: boolean\n nodeSelector:\n + \ description: Allows IPPool to allocate for a specific node by + label\n selector.\n type: string\n vxlanMode:\n + \ description: Contains configuration for VXLAN tunneling for + this pool.\n If not specified, then this is defaulted to \"Never\" + (i.e. VXLAN\n tunneling is disabled).\n type: + string\n required:\n - cidr\n type: object\n + \ type: object\n served: true\n storage: true\nstatus:\n acceptedNames:\n + \ kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: kubecontrollersconfigurations.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: KubeControllersConfiguration\n listKind: KubeControllersConfigurationList\n + \ plural: kubecontrollersconfigurations\n singular: kubecontrollersconfiguration\n + \ scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: KubeControllersConfigurationSpec + contains the values of the\n Kubernetes controllers configuration.\n + \ properties:\n controllers:\n description: + Controllers enables and configures individual Kubernetes\n controllers\n + \ properties:\n namespace:\n description: + Namespace enables and configures the namespace controller.\n Enabled + by default, set to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to perform + reconciliation\n with the Calico datastore. [Default: + 5m]'\n type: string\n type: object\n + \ node:\n description: Node enables and + configures the node controller.\n Enabled by default, set + to nil to disable.\n properties:\n hostEndpoint:\n + \ description: HostEndpoint controls syncing nodes to + host endpoints.\n Disabled by default, set to nil to + disable.\n properties:\n autoCreate:\n + \ description: 'AutoCreate enables automatic creation + of\n host endpoints for every node. [Default: Disabled]'\n + \ type: string\n type: object\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform reconciliation\n with + the Calico datastore. [Default: 5m]'\n type: string\n + \ syncLabels:\n description: 'SyncLabels + controls whether to copy Kubernetes\n node labels to + Calico nodes. [Default: Enabled]'\n type: string\n type: + object\n policy:\n description: Policy + enables and configures the policy controller.\n Enabled + by default, set to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to perform + reconciliation\n with the Calico datastore. [Default: + 5m]'\n type: string\n type: object\n + \ serviceAccount:\n description: ServiceAccount + enables and configures the service\n account controller. + Enabled by default, set to nil to disable.\n properties:\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform reconciliation\n with + the Calico datastore. [Default: 5m]'\n type: string\n + \ type: object\n workloadEndpoint:\n description: + WorkloadEndpoint enables and configures the workload\n endpoint + controller. Enabled by default, set to nil to disable.\n properties:\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform reconciliation\n with + the Calico datastore. [Default: 5m]'\n type: string\n + \ type: object\n type: object\n etcdV3CompactionPeriod:\n + \ description: 'EtcdV3CompactionPeriod is the period between etcdv3\n + \ compaction requests. Set to 0 to disable. [Default: 10m]'\n + \ type: string\n healthChecks:\n description: + 'HealthChecks enables or disables support for health\n checks + [Default: Enabled]'\n type: string\n logSeverityScreen:\n + \ description: 'LogSeverityScreen is the log severity above which + logs\n are sent to the stdout. [Default: Info]'\n type: + string\n prometheusMetricsPort:\n description: + 'PrometheusMetricsPort is the TCP port that the Prometheus\n metrics + server should bind to. Set to 0 to disable. [Default: 9094]'\n type: + integer\n required:\n - controllers\n type: + object\n status:\n description: KubeControllersConfigurationStatus + represents the status\n of the configuration. It's useful for admins + to be able to see the actual\n config that was applied, which can + be modified by environment variables\n on the kube-controllers + process.\n properties:\n environmentVars:\n additionalProperties:\n + \ type: string\n description: EnvironmentVars + contains the environment variables on\n the kube-controllers + that influenced the RunningConfig.\n type: object\n runningConfig:\n + \ description: RunningConfig contains the effective config that + is running\n in the kube-controllers pod, after merging the + API resource with\n any environment variables.\n properties:\n + \ controllers:\n description: Controllers + enables and configures individual Kubernetes\n controllers\n + \ properties:\n namespace:\n description: + Namespace enables and configures the namespace\n controller. + Enabled by default, set to nil to disable.\n properties:\n + \ reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform\n reconciliation + with the Calico datastore. [Default:\n 5m]'\n type: + string\n type: object\n node:\n + \ description: Node enables and configures the node controller.\n + \ Enabled by default, set to nil to disable.\n properties:\n + \ hostEndpoint:\n description: + HostEndpoint controls syncing nodes to host\n endpoints. + Disabled by default, set to nil to disable.\n properties:\n + \ autoCreate:\n description: + 'AutoCreate enables automatic creation\n of host + endpoints for every node. [Default: Disabled]'\n type: + string\n type: object\n leakGracePeriod:\n + \ description: 'LeakGracePeriod is the period used + by the\n controller to determine if an IP address + has been leaked.\n Set to 0 to disable IP garbage + collection. [Default:\n 15m]'\n type: + string\n reconcilerPeriod:\n description: + 'ReconcilerPeriod is the period to perform\n reconciliation + with the Calico datastore. [Default:\n 5m]'\n type: + string\n syncLabels:\n description: + 'SyncLabels controls whether to copy Kubernetes\n node + labels to Calico nodes. [Default: Enabled]'\n type: + string\n type: object\n policy:\n + \ description: Policy enables and configures the policy + controller.\n Enabled by default, set to nil to disable.\n + \ properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to + perform\n reconciliation with the Calico datastore. + [Default:\n 5m]'\n type: + string\n type: object\n serviceAccount:\n + \ description: ServiceAccount enables and configures the + service\n account controller. Enabled by default, set + to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to + perform\n reconciliation with the Calico datastore. + [Default:\n 5m]'\n type: + string\n type: object\n workloadEndpoint:\n + \ description: WorkloadEndpoint enables and configures + the workload\n endpoint controller. Enabled by default, + set to nil to disable.\n properties:\n reconcilerPeriod:\n + \ description: 'ReconcilerPeriod is the period to + perform\n reconciliation with the Calico datastore. + [Default:\n 5m]'\n type: + string\n type: object\n type: object\n + \ etcdV3CompactionPeriod:\n description: + 'EtcdV3CompactionPeriod is the period between etcdv3\n compaction + requests. Set to 0 to disable. [Default: 10m]'\n type: string\n + \ healthChecks:\n description: 'HealthChecks + enables or disables support for health\n checks [Default: + Enabled]'\n type: string\n logSeverityScreen:\n + \ description: 'LogSeverityScreen is the log severity above + which\n logs are sent to the stdout. [Default: Info]'\n type: + string\n prometheusMetricsPort:\n description: + 'PrometheusMetricsPort is the TCP port that the Prometheus\n metrics + server should bind to. Set to 0 to disable. [Default:\n 9094]'\n + \ type: integer\n required:\n - + controllers\n type: object\n type: object\n type: + object\n served: true\n storage: true\nstatus:\n acceptedNames:\n kind: + \"\"\n plural: \"\"\n conditions: []\n storedVersions: []\n\n---\n\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n + \ controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: null\n + \ name: networkpolicies.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: NetworkPolicy\n listKind: NetworkPolicyList\n plural: + networkpolicies\n singular: networkpolicy\n scope: Namespaced\n versions:\n + \ - name: v1\n schema:\n openAPIV3Schema:\n properties:\n + \ apiVersion:\n description: 'APIVersion defines the versioned + schema of this representation\n of an object. Servers should convert + recognized schemas to the latest\n internal value, and may reject + unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n properties:\n egress:\n description: + The ordered set of egress rules. Each rule contains\n a set + of packet match criteria and a corresponding action to apply.\n items:\n + \ description: \"A Rule encapsulates a set of match criteria + and an\n action. Both selector-based security Policy and security + Profiles\n reference rules - separated out as a list of rules + for both ingress\n and egress packet matching. \\n Each positive + match criteria has\n a negated version, prefixed with ”Not”. + All the match criteria\n within a rule must be satisfied for + a packet to match. A single\n rule can contain the positive + and negative version of a match\n and both must be satisfied + for the rule to match.\"\n properties:\n action:\n + \ type: string\n destination:\n description: + Destination contains the match criteria that apply\n to + destination entity.\n properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n ingress:\n description: The ordered set + of ingress rules. Each rule contains\n a set of packet match + criteria and a corresponding action to apply.\n items:\n description: + \"A Rule encapsulates a set of match criteria and an\n action. + \ Both selector-based security Policy and security Profiles\n reference + rules - separated out as a list of rules for both ingress\n and + egress packet matching. \\n Each positive match criteria has\n a + negated version, prefixed with ”Not”. All the match criteria\n within + a rule must be satisfied for a packet to match. A single\n rule + can contain the positive and negative version of a match\n and + both must be satisfied for the rule to match.\"\n properties:\n + \ action:\n type: string\n destination:\n + \ description: Destination contains the match criteria that + apply\n to destination entity.\n properties:\n + \ namespaceSelector:\n description: + \"NamespaceSelector is an optional field that\n contains + a selector expression. Only traffic that originates\n from + (or terminates at) endpoints within the selected\n namespaces + will be matched. When both NamespaceSelector\n and + Selector are defined on the same rule, then only workload\n endpoints + that are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n http:\n description: + HTTP contains match criteria that apply to HTTP\n requests.\n + \ properties:\n methods:\n description: + Methods is an optional field that restricts\n the + rule to apply only to HTTP requests that use one of\n the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple\n methods + are OR'd together.\n items:\n type: + string\n type: array\n paths:\n + \ description: 'Paths is an optional field that restricts\n + \ the rule to apply to HTTP requests that use one of + the\n listed HTTP Paths. Multiple paths are OR''d together.\n + \ e.g: - exact: /foo - prefix: /bar NOTE: Each entry + may\n ONLY specify either a `exact` or a `prefix` match. + The\n validator will check for it.'\n items:\n + \ description: 'HTTPPath specifies an HTTP path to + match.\n It may be either of the form: exact: : + which matches\n the path exactly or prefix: : + which matches\n the path prefix'\n properties:\n + \ exact:\n type: + string\n prefix:\n type: + string\n type: object\n type: + array\n type: object\n icmp:\n description: + ICMP is an optional field that restricts the rule\n to + apply to a specific type and code of ICMP traffic. This\n should + only be specified if the Protocol field is set to \"ICMP\"\n or + \"ICMPv6\".\n properties:\n code:\n + \ description: Match on a specific ICMP code. If specified,\n + \ the Type value must also be specified. This is a + technical\n limitation imposed by the kernel’s iptables + firewall,\n which Calico uses to enforce the rule.\n + \ type: integer\n type:\n description: + Match on a specific ICMP type. For example\n a value + of 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n ipVersion:\n + \ description: IPVersion is an optional field that restricts + the\n rule to only match a specific IP version.\n type: + integer\n metadata:\n description: + Metadata contains additional information for this\n rule\n + \ properties:\n annotations:\n + \ additionalProperties:\n type: + string\n description: Annotations is a set of key value + pairs that\n give extra information about the rule\n + \ type: object\n type: object\n + \ notICMP:\n description: NotICMP is + the negated version of the ICMP field.\n properties:\n + \ code:\n description: Match + on a specific ICMP code. If specified,\n the Type + value must also be specified. This is a technical\n limitation + imposed by the kernel’s iptables firewall,\n which + Calico uses to enforce the rule.\n type: integer\n + \ type:\n description: Match + on a specific ICMP type. For example\n a value of + 8 refers to ICMP Echo Request (i.e. pings).\n type: + integer\n type: object\n notProtocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: NotProtocol is the negated + version of the Protocol\n field.\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n protocol:\n + \ anyOf:\n - type: integer\n - + type: string\n description: \"Protocol is an optional field + that restricts the\n rule to only apply to traffic of a + specific IP protocol. Required\n if any of the EntityRules + contain Ports (because ports only\n apply to certain protocols). + \\n Must be one of these string\n values: \\\"TCP\\\", + \\\"UDP\\\", \\\"ICMP\\\", \\\"ICMPv6\\\", \\\"SCTP\\\",\n \\\"UDPLite\\\" + or an integer in the range 1-255.\"\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n source:\n description: Source + contains the match criteria that apply to\n source entity.\n + \ properties:\n namespaceSelector:\n + \ description: \"NamespaceSelector is an optional field + that\n contains a selector expression. Only traffic + that originates\n from (or terminates at) endpoints + within the selected\n namespaces will be matched. When + both NamespaceSelector\n and Selector are defined on + the same rule, then only workload\n endpoints that + are matched by both selectors will be selected\n by + the rule. \\n For NetworkPolicy, an empty NamespaceSelector\n implies + that the Selector is limited to selecting only\n workload + endpoints in the same namespace as the NetworkPolicy.\n \\n + For NetworkPolicy, `global()` NamespaceSelector implies\n that + the Selector is limited to selecting only GlobalNetworkSet\n or + HostEndpoint. \\n For GlobalNetworkPolicy, an empty\n NamespaceSelector + implies the Selector applies to workload\n endpoints + across all namespaces.\"\n type: string\n nets:\n + \ description: Nets is an optional field that restricts + the\n rule to only apply to traffic that originates + from (or\n terminates at) IP addresses in any of + the given subnets.\n items:\n type: + string\n type: array\n notNets:\n + \ description: NotNets is the negated version of the + Nets\n field.\n items:\n + \ type: string\n type: + array\n notPorts:\n description: + NotPorts is the negated version of the Ports\n field. + Since only some protocols have ports, if any ports\n are + specified it requires the Protocol match in the Rule\n to + be set to \"TCP\" or \"UDP\".\n items:\n anyOf:\n + \ - type: integer\n - + type: string\n pattern: ^.*\n x-kubernetes-int-or-string: + true\n type: array\n notSelector:\n + \ description: NotSelector is the negated version of + the Selector\n field. See Selector field for subtleties + with negated\n selectors.\n type: + string\n ports:\n description: + \"Ports is an optional field that restricts\n the rule + to only apply to traffic that has a source (destination)\n port + that matches one of these ranges/values. This value\n is + a list of integers or strings that represent ranges\n of + ports. \\n Since only some protocols have ports, if\n any + ports are specified it requires the Protocol match\n in + the Rule to be set to \\\"TCP\\\" or \\\"UDP\\\".\"\n items:\n + \ anyOf:\n - type: + integer\n - type: string\n pattern: + ^.*\n x-kubernetes-int-or-string: true\n type: + array\n selector:\n description: + \"Selector is an optional field that contains\n a selector + expression (see Policy for sample syntax).\n \\ Only + traffic that originates from (terminates at) endpoints\n matching + the selector will be matched. \\n Note that: in\n addition + to the negated version of the Selector (see NotSelector\n below), + the selector expression syntax itself supports\n negation. + \ The two types of negation are subtly different.\n One + negates the set of matched endpoints, the other negates\n the + whole match: \\n \\tSelector = \\\"!has(my_label)\\\" matches\n packets + that are from other Calico-controlled \\tendpoints\n that + do not have the label “my_label”. \\n \\tNotSelector\n = + \\\"has(my_label)\\\" matches packets that are not from\n Calico-controlled + \\tendpoints that do have the label “my_label”.\n \\n + The effect is that the latter will accept packets from\n non-Calico + sources whereas the former is limited to packets\n from + Calico-controlled endpoints.\"\n type: string\n serviceAccounts:\n + \ description: ServiceAccounts is an optional field + that restricts\n the rule to only apply to traffic + that originates from\n (or terminates at) a pod running + as a matching service\n account.\n properties:\n + \ names:\n description: + Names is an optional field that restricts\n the + rule to only apply to traffic that originates\n from + (or terminates at) a pod running as a service\n account + whose name is in the list.\n items:\n type: + string\n type: array\n selector:\n + \ description: Selector is an optional field that + restricts\n the rule to only apply to traffic + that originates\n from (or terminates at) a pod + running as a service\n account that matches the + given label selector. If\n both Names and Selector + are specified then they are\n AND'ed.\n type: + string\n type: object\n services:\n + \ description: \"Services is an optional field that + contains\n options for matching Kubernetes Services. + If specified,\n only traffic that originates from + or terminates at endpoints\n within the selected + service(s) will be matched, and only\n to/from each + endpoint's port. \\n Services cannot be specified\n on + the same rule as Selector, NotSelector, NamespaceSelector,\n Ports, + NotPorts, Nets, NotNets or ServiceAccounts. \\n\n Only + valid on egress rules.\"\n properties:\n name:\n + \ description: Name specifies the name of a Kubernetes\n + \ Service to match.\n type: + string\n namespace:\n description: + Namespace specifies the namespace of the\n given + Service. If left empty, the rule will match\n within + this policy's namespace.\n type: string\n type: + object\n type: object\n required:\n + \ - action\n type: object\n type: + array\n order:\n description: Order is an optional + field that specifies the order in\n which the policy is applied. + Policies with higher \"order\" are applied\n after those with + lower order. If the order is omitted, it may be\n considered + to be \"infinite\" - i.e. the policy will be applied last. Policies\n with + identical order will be applied in alphanumerical order based\n on + the Policy \"Name\".\n type: number\n selector:\n + \ description: \"The selector is an expression used to pick pick + out\n the endpoints that the policy should be applied to. \\n + Selector\n expressions follow this syntax: \\n \\tlabel == \\\"string_literal\\\"\n + \ \\ -> comparison, e.g. my_label == \\\"foo bar\\\" \\tlabel + != \\\"string_literal\\\"\n \\ -> not equal; also matches if + label is not present \\tlabel in\n { \\\"a\\\", \\\"b\\\", \\\"c\\\", + ... } -> true if the value of label X is\n one of \\\"a\\\", + \\\"b\\\", \\\"c\\\" \\tlabel not in { \\\"a\\\", \\\"b\\\", \\\"c\\\",\n ... + } -> true if the value of label X is not one of \\\"a\\\", \\\"b\\\",\n \\\"c\\\" + \\thas(label_name) -> True if that label is present \\t! expr\n -> + negation of expr \\texpr && expr -> Short-circuit and \\texpr\n || + expr -> Short-circuit or \\t( expr ) -> parens for grouping \\tall()\n or + the empty selector -> matches all endpoints. \\n Label names are\n allowed + to contain alphanumerics, -, _ and /. String literals are\n more + permissive but they do not support escape characters. \\n Examples\n (with + made-up labels): \\n \\ttype == \\\"webserver\\\" && deployment\n == + \\\"prod\\\" \\ttype in {\\\"frontend\\\", \\\"backend\\\"} \\tdeployment !=\n + \ \\\"dev\\\" \\t! has(label_name)\"\n type: + string\n serviceAccountSelector:\n description: + ServiceAccountSelector is an optional field for an expression\n used + to select a pod based on service accounts.\n type: string\n types:\n + \ description: \"Types indicates whether this policy applies to + ingress,\n or to egress, or to both. When not explicitly specified + (and so\n the value on creation is empty or nil), Calico defaults + Types according\n to what Ingress and Egress are present in the + policy. The default\n is: \\n - [ PolicyTypeIngress ], if there + are no Egress rules (including\n the case where there are also + no Ingress rules) \\n - [ PolicyTypeEgress\n ], if there are + Egress rules but no Ingress rules \\n - [ PolicyTypeIngress,\n PolicyTypeEgress + ], if there are both Ingress and Egress rules.\n \\n When the + policy is read back again, Types will always be one\n of these + values, never empty or nil.\"\n items:\n description: + PolicyType enumerates the possible values of the PolicySpec\n Types + field.\n type: string\n type: array\n type: + object\n type: object\n served: true\n storage: true\nstatus:\n + \ acceptedNames:\n kind: \"\"\n plural: \"\"\n conditions: []\n storedVersions: + []\n\n---\n\n---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n + \ annotations:\n controller-gen.kubebuilder.io/version: (devel)\n creationTimestamp: + null\n name: networksets.crd.projectcalico.org\nspec:\n group: crd.projectcalico.org\n + \ names:\n kind: NetworkSet\n listKind: NetworkSetList\n plural: networksets\n + \ singular: networkset\n scope: Namespaced\n versions:\n - name: v1\n schema:\n + \ openAPIV3Schema:\n description: NetworkSet is the Namespaced-equivalent + of the GlobalNetworkSet.\n properties:\n apiVersion:\n description: + 'APIVersion defines the versioned schema of this representation\n of + an object. Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind + is a string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n + \ spec:\n description: NetworkSetSpec contains the specification + for a NetworkSet\n resource.\n properties:\n nets:\n + \ description: The list of IP networks that belong to this set.\n + \ items:\n type: string\n type: + array\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n\n---\n---\n# Source: calico/templates/calico-kube-controllers-rbac.yaml\n\n# + Include a clusterrole for the kube-controllers component,\n# and bind it to the + calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n + \ name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for + deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n + \ - watch\n - list\n - get\n # Pods are watched to check for existence + as part of IPAM controller.\n - apiGroups: [\"\"]\n resources:\n - pods\n + \ verbs:\n - get\n - list\n - watch\n # IPAM resources are manipulated + when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n + \ - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n + \ verbs:\n - get\n - list\n - create\n - update\n - + delete\n - watch\n # kube-controllers manages hostendpoints.\n - apiGroups: + [\"crd.projectcalico.org\"]\n resources:\n - hostendpoints\n verbs:\n + \ - get\n - list\n - create\n - update\n - delete\n # + Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - clusterinformations\n verbs:\n - get\n - + create\n - update\n # KubeControllersConfiguration is where it gets its + config\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - kubecontrollersconfigurations\n + \ verbs:\n # read its own config\n - get\n # create a default + if none exists\n - create\n # update status\n - update\n # + watch for changes\n - watch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n + \ name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n + \ kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n - kind: ServiceAccount\n + \ name: calico-kube-controllers\n namespace: kube-system\n---\n\n---\n# Source: + calico/templates/calico-node-rbac.yaml\n# Include a clusterrole for the calico-node + DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: + rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The + CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n + \ resources:\n - pods\n - nodes\n - namespaces\n verbs:\n + \ - get\n # EndpointSlices are used for Service-based network policy rule\n + \ # enforcement.\n - apiGroups: [\"discovery.k8s.io\"]\n resources:\n - + endpointslices\n verbs:\n - watch\n - list\n - apiGroups: [\"\"]\n + \ resources:\n - endpoints\n - services\n verbs:\n # Used + to discover service IPs for advertisement.\n - watch\n - list\n # + Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm + needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n + \ verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n + \ verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - + patch\n # Calico stores some configuration information in node annotations.\n + \ - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: + [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n + \ - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: + [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n + \ verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n + \ - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - + patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - globalfelixconfigs\n - felixconfigurations\n - + bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n + \ - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n + \ - networkpolicies\n - networksets\n - clusterinformations\n - + hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n + \ - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: + [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n + \ - clusterinformations\n verbs:\n - create\n - update\n # Calico + stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n + \ - nodes\n verbs:\n - get\n - list\n - watch\n # These + permissions are only required for upgrade from v2.6, and can\n # be removed after + upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n + \ resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - + create\n - update\n # These permissions are required for Calico CNI to perform + IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n + \ - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n + \ - get\n - list\n - create\n - update\n - delete\n - + apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n + \ verbs:\n - get\n # Block affinities must also be watchable by confd + for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n + \ - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration + needs to get daemonsets. These permissions can be\n # removed if not upgrading + from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n + \ - daemonsets\n verbs:\n - get\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: + ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n + \ kind: ClusterRole\n name: calico-node\nsubjects:\n - kind: ServiceAccount\n + \ name: calico-node\n namespace: kube-system\n\n---\n# Source: calico/templates/calico-node.yaml\n# + This manifest installs the calico-node container, as well\n# as the CNI plugins + and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: + DaemonSet\napiVersion: apps/v1\nmetadata:\n name: calico-node\n namespace: kube-system\n + \ labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: + calico-node\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n + \ maxUnavailable: 1\n template:\n metadata:\n labels:\n k8s-app: + calico-node\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n + \ hostNetwork: true\n tolerations:\n # Make sure calico-node gets + scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n + \ # Mark the pod as a critical add-on for rescheduling.\n - key: + CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n + \ operator: Exists\n serviceAccountName: calico-node\n # Minimize + downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n + \ # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n + \ terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n + \ initContainers:\n # This container performs upgrade from host-local + IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, + or if you have already\n # upgraded to use calico-ipam.\n - name: + upgrade-ipam\n image: calico/cni:v3.20.0\n command: [\"/opt/cni/bin/calico-ipam\", + \"-upgrade\"]\n envFrom:\n - configMapRef:\n # + Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for + eBPF mode.\n name: kubernetes-services-endpoint\n optional: + true\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n + \ fieldRef:\n fieldPath: spec.nodeName\n - + name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config\n key: calico_backend\n + \ volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: + host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: + cni-bin-dir\n securityContext:\n privileged: true\n # + This container installs the CNI binaries\n # and CNI network config file + on each node.\n - name: install-cni\n image: calico/cni:v3.20.0\n + \ command: [\"/opt/cni/bin/install\"]\n envFrom:\n - + configMapRef:\n # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT + to be overridden for eBPF mode.\n name: kubernetes-services-endpoint\n + \ optional: true\n env:\n # Name of the CNI + config file to create.\n - name: CNI_CONF_NAME\n value: + \"10-calico.conflist\"\n # The CNI network config to install on each + node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config\n key: cni_network_config\n + \ # Set the hostname based on the k8s node name.\n - name: + KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: + spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config\n key: veth_mtu\n # Prevents the container + from sleeping forever.\n - name: SLEEP\n value: \"false\"\n + \ volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: + cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: + cni-net-dir\n securityContext:\n privileged: true\n # + Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n + \ # to communicate with Felix over the Policy Sync API.\n - name: + flexvol-driver\n image: calico/pod2daemon-flexvol:v3.20.0\n volumeMounts:\n + \ - name: flexvol-driver-host\n mountPath: /host/driver\n + \ securityContext:\n privileged: true\n containers:\n + \ # Runs calico-node container on each Kubernetes node. This\n # + container programs network policy and routes on each\n # host.\n - + name: calico-node\n image: calico/node:v3.20.0\n envFrom:\n + \ - configMapRef:\n # Allow KUBERNETES_SERVICE_HOST and + KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.\n name: + kubernetes-services-endpoint\n optional: true\n env:\n + \ # Use Kubernetes API as the backing datastore.\n - name: + DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the + datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n + \ # Set based on the k8s node name.\n - name: NODENAME\n + \ valueFrom:\n fieldRef:\n fieldPath: + spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config\n key: calico_backend\n # Cluster type + to identify the deployment type\n - name: CLUSTER_TYPE\n value: + \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: + IP\n value: \"autodetect\"\n # Enable VXLAN\n - + name: CALICO_IPV4POOL_VXLAN\n value: \"Always\"\n # Set + MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config\n key: veth_mtu\n # Set MTU for the + VXLAN tunnel device.\n - name: FELIX_VXLANMTU\n valueFrom:\n + \ configMapKeyRef:\n name: calico-config\n key: + veth_mtu\n # Set MTU for the Wireguard tunnel device.\n - + name: FELIX_WIREGUARDMTU\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config\n key: veth_mtu\n # + The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # + chosen from this range. Changing this value after installation will have\n # + no effect. This should fall within `--cluster-cidr`.\n # - name: CALICO_IPV4POOL_CIDR\n + \ # value: \"192.168.0.0/16\"\n # Disable file logging + so `kubectl logs` works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: + \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n + \ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n + \ # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n + \ value: \"false\"\n - name: FELIX_FEATUREDETECTOVERRIDE\n + \ value: \"ChecksumOffloadBroken=true\"\n - name: FELIX_HEALTHENABLED\n + \ value: \"true\"\n securityContext:\n privileged: + true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n + \ exec:\n command:\n - /bin/calico-node\n + \ - -felix-live\n periodSeconds: 10\n initialDelaySeconds: + 10\n failureThreshold: 6\n readinessProbe:\n exec:\n + \ command:\n - /bin/calico-node\n - + -felix-ready\n periodSeconds: 10\n volumeMounts:\n - + mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n readOnly: + false\n - mountPath: /lib/modules\n name: lib-modules\n + \ readOnly: true\n - mountPath: /run/xtables.lock\n name: + xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n + \ name: var-run-calico\n readOnly: false\n - + mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: + false\n - name: policysync\n mountPath: /var/run/nodeagent\n + \ # For eBPF mode, we need to be able to mount the BPF filesystem at + /sys/fs/bpf so we mount in the\n # parent directory.\n - + name: sysfs\n mountPath: /sys/fs/\n # Bidirectional + means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to + the host.\n # If the host is known to mount that filesystem already + then Bidirectional can be omitted.\n mountPropagation: Bidirectional\n + \ - name: cni-log-dir\n mountPath: /var/log/calico/cni\n + \ readOnly: true\n volumes:\n # Used by calico-node.\n + \ - name: lib-modules\n hostPath:\n path: /lib/modules\n + \ - name: var-run-calico\n hostPath:\n path: /var/run/calico\n + \ - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n + \ - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n + \ type: FileOrCreate\n - name: sysfs\n hostPath:\n path: + /sys/fs/\n type: DirectoryOrCreate\n # Used to install CNI.\n + \ - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n + \ - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n + \ # Used to access CNI logs.\n - name: cni-log-dir\n hostPath:\n + \ path: /var/log/calico/cni\n # Mount in the directory for host-local + IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, + and can be removed\n # if not using the upgrade-ipam init container.\n + \ - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n + \ # Used to create per-pod Unix Domain Sockets\n - name: policysync\n + \ hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n + \ # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n + \ hostPath:\n type: DirectoryOrCreate\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n---\n\napiVersion: + v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n\n---\n# + Source: calico/templates/calico-kube-controllers.yaml\n# See https://github.com/projectcalico/kube-controllers\napiVersion: + apps/v1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: + kube-system\n labels:\n k8s-app: calico-kube-controllers\nspec:\n # The controllers + can only have a single active instance.\n replicas: 1\n selector:\n matchLabels:\n + \ k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n + \ metadata:\n name: calico-kube-controllers\n namespace: kube-system\n + \ labels:\n k8s-app: calico-kube-controllers\n spec:\n nodeSelector:\n + \ kubernetes.io/os: linux\n tolerations:\n # Mark the pod as + a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: + Exists\n - key: node-role.kubernetes.io/master\n effect: NoSchedule\n + \ serviceAccountName: calico-kube-controllers\n priorityClassName: system-cluster-critical\n + \ containers:\n - name: calico-kube-controllers\n image: calico/kube-controllers:v3.20.0\n + \ env:\n # Choose which controllers to run.\n - + name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n + \ value: kubernetes\n livenessProbe:\n exec:\n + \ command:\n - /usr/bin/check-status\n - + -l\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: + 6\n timeoutSeconds: 10\n readinessProbe:\n exec:\n + \ command:\n - /usr/bin/check-status\n - + -r\n periodSeconds: 10\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n + \ name: calico-kube-controllers\n namespace: kube-system\n\n---\n\n# This manifest + creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler + to evict\n\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n + \ name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: + calico-kube-controllers\nspec:\n maxUnavailable: 1\n selector:\n matchLabels:\n + \ k8s-app: calico-kube-controllers\n---\n# Source: calico/templates/calico-etcd-secrets.yaml\n\n---\n# + Source: calico/templates/calico-typha.yaml\n\n---\n# Source: calico/templates/configure-canal.yaml\n" +kind: ConfigMap +metadata: + creationTimestamp: null + name: calico-crs-configmap + namespace: default diff --git a/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/calico-crs.yaml b/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/calico-crs.yaml new file mode 100644 index 0000000000..acfe874639 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/calico-crs.yaml @@ -0,0 +1,13 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: calico-crs + namespace: default +spec: + clusterSelector: + matchLabels: + cni: calico + resources: + - kind: ConfigMap + name: calico-crs-configmap + diff --git a/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml b/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml new file mode 100644 index 0000000000..a0182960b8 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml @@ -0,0 +1,37 @@ +apiVersion: capi.weave.works/v1alpha1 +kind: ClusterBootstrapConfig +metadata: + name: capi-gitops + namespace: default +spec: + clusterSelector: + matchLabels: + weave.works/capi: bootstrap + jobTemplate: + generateName: "run-gitops-{{ .ObjectMeta.Name }}" + spec: + containers: + - image: ghcr.io/fluxcd/flux-cli:v0.29.5 + name: flux-bootstrap + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/gitops" + readOnly: true + args: + [ + "bootstrap", + "github", + "--kubeconfig=/etc/gitops/value", + "--owner=$GITHUB_USER", + "--repository=fleet-infra", + "--path=./clusters/{{ .ObjectMeta.Namespace }}/{{ .ObjectMeta.Name }}", + ] + envFrom: + - secretRef: + name: my-pat + restartPolicy: Never + volumes: + - name: kubeconfig + secret: + secretName: "{{ .ObjectMeta.Name }}-kubeconfig" diff --git a/website/versioned_docs/version-0.12.0/cluster-management/assets/profiles/profile-repo.yaml b/website/versioned_docs/version-0.12.0/cluster-management/assets/profiles/profile-repo.yaml new file mode 100644 index 0000000000..dfd989d091 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/assets/profiles/profile-repo.yaml @@ -0,0 +1,10 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + creationTimestamp: null + name: weaveworks-charts + namespace: flux-system +spec: + interval: 1m + url: https://my-org.github.io/profiles +status: {} diff --git a/website/versioned_docs/version-0.12.0/cluster-management/assets/rbac/wego-admin.yaml b/website/versioned_docs/version-0.12.0/cluster-management/assets/rbac/wego-admin.yaml new file mode 100644 index 0000000000..54fdc43f79 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/assets/rbac/wego-admin.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wego-admin-cluster-role-binding +subjects: + - kind: User + name: wego-admin + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: wego-admin-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: wego-admin-cluster-role +rules: + - apiGroups: [""] + resources: ["secrets", "pods"] + verbs: ["get", "list"] + - apiGroups: ["apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "list"] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: ["kustomizations"] + verbs: ["get", "list", "patch"] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: ["helmreleases"] + verbs: ["get", "list", "patch"] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: ["get", "list", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - apiGroups: ["pac.weave.works"] + resources: ["policies"] + verbs: ["get", "list"] diff --git a/website/versioned_docs/version-0.12.0/cluster-management/cluster-api-providers.mdx b/website/versioned_docs/version-0.12.0/cluster-management/cluster-api-providers.mdx new file mode 100644 index 0000000000..b11a6e5c08 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/cluster-api-providers.mdx @@ -0,0 +1,40 @@ +--- +title: Cluster API Providers +sidebar_position: 2 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Creating leaf clusters + +To enable leaf cluster creation, Weave GitOps leverages the Cluster-API (CAPI) providers for [AWS](https://cluster-api-aws.sigs.k8s.io/getting-started.html) or [Docker](https://cluster-api.sigs.k8s.io/user/quick-start.html). +In this section we cover the steps to deploy the providers on a Kubernetes cluster that is running the Weave GitOps. + +CAPI provides declarative APIs, controllers, and tooling to manage the lifecycle of Kubernetes clusters, across +a large number of [infrastructure providers](https://cluster-api.sigs.k8s.io/reference/providers.html#infrastructure). +The CAPI custom resource definitions are platform independent as each provider implementation handles the creation of VMs, +VPCs, networks and other required infrastructure parts, enabling consistent and repeatable cluster deployments. +For more information on the CAPI project, refer to the [CAPI book](https://cluster-api.sigs.k8s.io/introduction.html). + +## Configure and deploy the CAPI providers + +In all cases, CAPI requires kubectl access to an existing Kubernetes cluster, so in our case we configure `kubectl` to use the management cluster. + +```bash +export KUBECONFIG=/path/to/kubeconfig +``` + +## AWS provider (CAPA) + +After having configured `kubectl`, to deploy the CAPA components, follow the steps at https://cluster-api-aws.sigs.k8s.io/getting-started.html#install-clusterctl + +## Docker provider (CAPD) + +The Docker infrastructure provider is a reference implementation and is a practical way of testing the Weave GitOps cluster creation feature. It is not intended for production clusters. As CAPD will start docker containers in the host nodes of the management cluster, note that if you are using it with a `kind` cluster you'll need to mount the docker socket as described in the [Install and/or configure a kubernetes cluster](https://cluster-api-aws.sigs.k8s.io/getting-started.html#install-andor-configure-a-kubernetes-cluster) kind section. + +Similar to the AWS provider case, configure `kubectl` to use the management cluster, and to deploy the CAPD components follow the steps at https://cluster-api-aws.sigs.k8s.io/getting-started.html#install-clusterctl. diff --git a/website/versioned_docs/version-0.12.0/cluster-management/deleting-a-cluster.mdx b/website/versioned_docs/version-0.12.0/cluster-management/deleting-a-cluster.mdx new file mode 100644 index 0000000000..02bf0f8074 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/deleting-a-cluster.mdx @@ -0,0 +1,22 @@ +--- +title: Deleting a Cluster +hide_title: true +sidebar_position: 5 +--- + +import TierLabel from "../_components/TierLabel"; + +# Deleting a Cluster + +### How to: delete a cluster using UI + +- Select the cluster clusters you want to delete +- Press `CREATE A PR TO DELETE CLUSTERS` button +- Update the deletion PR values or leave the default values +- Press `Remove clusters` button +- Merge the create PR for clusters deletion + +### Notes + +A current limitation is the inability to apply an _empty_ repository to a cluster. If you have capi clusters and other manifests committed to this repository, and then _delete all of them_ so there are 0 manifests left, then the apply will fail and the resources will not be removed from the cluster. +A workaround is to add a dummy _ConfigMap_ back to the git repository after deleting everything else so that there is at least 1 manifest to apply. diff --git a/website/versioned_docs/version-0.12.0/cluster-management/getting-started.mdx b/website/versioned_docs/version-0.12.0/cluster-management/getting-started.mdx new file mode 100644 index 0000000000..ea7ec8533e --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/getting-started.mdx @@ -0,0 +1,253 @@ +--- +title: Getting started +sidebar_position: 1 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Getting started + +## Creating your first CAPD Cluster + +If you've followed the [Installation guide](installation/weave-gitops-enterprise.mdx) you should have: + +1. Weave GitOps Enterprise installed +2. A CAPI provider installed (With support for `ClusterResourceSet`s enabled). + +Next up we'll add a template and use it to create a cluster. + +### Directory structure + +Let's setup a directory structure to manage our clusters + +```bash +mkdir -p clusters/bases \ + clusters/management/capi/templates \ + clusters/management/capi/bootstrap \ + clusters/management/capi/profiles +``` + +Now we should have: + +```bash +. +└── clusters + ├── bases + └── management + └── capi + ├── bootstrap + ├── profiles + └── templates +``` + +This assumes that we've configured flux to reconcile everything in `clusters/management` into our management cluster. + +To keep things organized we've created some subpaths for the different resources: + +- `bases` for any common resources between clusters like RBAC and policy. +- `templates` for `CAPITemplates` +- `bootstrap` for `ClusterBootstrapConfig`, `ClusterResourceSet` and the `ConfigMap` they reference +- `profiles` for the `HelmRepository` of the profiles for the newly created clusters + +Lets grab some sample resources to create our first cluster! + +### Add common RBAC to the repo + +When a cluster is provisioned, by default it will reconcile all the manifests in `./clusters//` and `./clusters/bases`. + +To display Applications and Sources in the UI we need to give the logged in user permissions to inspect the new cluster. + +Adding common rbac rules to `./clusters/bases/rbac` is an easy way to configure this! + +import WegoAdmin from "!!raw-loader!./assets/rbac/wego-admin.yaml"; + + + {() => ( + + curl -o clusters/bases/rbac/wego-admin.yaml {window.location.protocol}// + {window.location.host} + {require("./assets/rbac/wego-admin.yaml").default} + + )} + + + + {WegoAdmin} + + +### Add a template + +See [CAPI Templates](../gitops-templates/templates.mdx) page for more details on this topic. Once we load a template we can use it in the UI to create clusters! + +import CapdTemplate from "!!raw-loader!../assets/templates/capd-template.yaml"; + +Download the template below to your config repository path, then commit and push to your git origin. + + + {() => ( + + curl -o clusters/management/capi/templates/capd-template.yaml{" "} + {window.location.protocol}//{window.location.host} + {require("../assets/templates/capd-template.yaml").default} + + )} + + + + {CapdTemplate} + + +## Automatically install a CNI with `ClusterResourceSet`s + +We can use `ClusterResourceSet`s to automatically install CNI's on a new cluster, here we use calico as an example. + +### Add a CRS to install a CNI + +Create a calico configmap and a CRS as follows: + +import CalicoCRS from "!!raw-loader!./assets/bootstrap/calico-crs.yaml"; + + + {() => ( + + curl -o clusters/management/capi/bootstrap/calico-crs.yaml{" "} + {window.location.protocol}//{window.location.host} + {require("./assets/bootstrap/calico-crs.yaml").default} + {"\n"} + curl -o clusters/management/capi/bootstrap/calico-crs-configmap.yaml { + window.location.protocol + }//{window.location.host} + {require("./assets/bootstrap/calico-crs-configmap.yaml").default} + + )} + + + + {CalicoCRS} + + +The full [`calico-crs-configmap.yaml`](./assets/bootstrap/calico-crs-configmap.yaml) is a bit large to display inline here but make sure to download it to `clusters/management/capi/bootstrap/calico-crs-configmap.yaml` too, manually or with the above `curl` command. + +## Profiles and clusters + +WGE can automatically install profiles onto new clusters + +#### Add a helmrepo + +import ProfileRepo from "!!raw-loader!./assets/profiles/profile-repo.yaml"; + +A profile is an enhanced helm chart. When publishing profiles to helm repositories make sure to include the `weave.works/profile` in `Chart.yaml`. These annotated profiles will appear in WGE + +``` +annotations: + weave.works/profile: nginx-profile +``` + +Download the profile repository below to your config repository path then commit and push. Make sure to update the url to point to a Helm repository containing your profiles. + + + {() => ( + + curl -o clusters/management/capi/profiles/profile-repo.yaml{" "} + {window.location.protocol} + //{window.location.host} + {require("./assets/profiles/profile-repo.yaml").default} + + )} + + + + {ProfileRepo} + + +#### Add a cluster bootstrap config + +Create a cluster bootstrap config as follows: + +```bash + kubectl create secret generic my-pat --from-literal GITHUB_TOKEN=$GITHUB_TOKEN +``` + +import CapiGitopsCDC from "!!raw-loader!./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml"; + +Download the config with + + + {() => ( + + curl -o + clusters/management/capi/bootstrap/capi-gitops-cluster-bootstrap-config.yaml{" "} + {window.location.protocol} + //{window.location.host} + { + require("./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml") + .default + } + + )} + + +Then update the `GITOPS_REPO` variable to point to your cluster + + + {CapiGitopsCDC} + + +#### Add Monitoring Dashboards to your cluster + +In order to add dashboards to your cluster, you'll need to use metadata annotations following the below pattern. + +``` +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + annotations: + metadata.weave.works/dashboard.grafana: https://grafana.com/ + metadata.weave.works/dashboard.prometheus: https://prometheus.io/ +``` + +#### Specifying CAPI cluster kinds + +To be able to explicitly specify the type of cluster, you need to use metadata annotations using `weave.works/cluster-kind` for the annotation key as the below pattern: + +``` +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + annotations: + weave.works/cluster-kind: +``` +where the **CLUSTER_KIND** can be one of the following supported ones: + - DockerCluster + - AWSCluster + - AWSManagedCluster + - AzureCluster + - AzureManagedCluster + - GCPCluster + - MicrovmCluster + - Rancher + - Openshift + - Tanzu + - OtherOnprem + +## Test + +You should now be able to create a new cluster from your template and install profiles onto it with a single Pull Request via the WGE UI! diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-btn.png b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-btn.png new file mode 100644 index 0000000000..e4efad3c97 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-btn.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-form.png b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-form.png new file mode 100644 index 0000000000..35abd63d05 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-form.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-helm-release.png b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-helm-release.png new file mode 100644 index 0000000000..3405d63876 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-helm-release.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-kustomization.png b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-kustomization.png new file mode 100644 index 0000000000..fdd1fab580 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-kustomization.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-select-source.png b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-select-source.png new file mode 100644 index 0000000000..ce3998e4bc Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/add-application-select-source.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/disconnect-cluster.png b/website/versioned_docs/version-0.12.0/cluster-management/img/disconnect-cluster.png new file mode 100644 index 0000000000..5a08b5afbc Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/disconnect-cluster.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/identity-selection.png b/website/versioned_docs/version-0.12.0/cluster-management/img/identity-selection.png new file mode 100644 index 0000000000..c1ca94f155 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/identity-selection.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/img/profile-selection.png b/website/versioned_docs/version-0.12.0/cluster-management/img/profile-selection.png new file mode 100644 index 0000000000..4f0243b070 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/cluster-management/img/profile-selection.png differ diff --git a/website/versioned_docs/version-0.12.0/cluster-management/intro.mdx b/website/versioned_docs/version-0.12.0/cluster-management/intro.mdx new file mode 100644 index 0000000000..42c4430d56 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/intro.mdx @@ -0,0 +1,15 @@ +--- +title: Introduction +sidebar_position: 0 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Cluster management + +Weave GitOps Enterprise (WGE) can provision Kubernetes clusters using any of the CAPI providers installed. The lifecycle management of these clusters is done declaratively via GitOps and WGE simplifies this process by providing both a Web UI and a CLI to interact with and manage these clusters. \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/cluster-management/managing-existing-clusters.mdx b/website/versioned_docs/version-0.12.0/cluster-management/managing-existing-clusters.mdx new file mode 100644 index 0000000000..e3fb4bb0b5 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/managing-existing-clusters.mdx @@ -0,0 +1,275 @@ +--- +title: Managing existing clusters +hide_title: true +sidebar_position: 2 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; + +# Managing existing clusters + +### Managing non-capi clusters {#how-to-connect-a-cluster} + +Any kubernetes cluster whether capi or not can be added to Weave Gitops Enterprise. The only thing we need is a secret containing a valid `kubeconfig`. + +import TOCInline from "@theme/TOCInline"; +; + + + + +If you already have a `kubeconfig` stored in a secret in your management cluster, continue below to create a `GitopsCluster`. + +If you have a kubeconfig, you can load in into the cluster like so: + +``` +kubectl create secret generic demo-01-kubeconfig \ +--from-file=value=./demo-01-kubeconfig +``` + + + + +### How to create a kubeconfig secret using a service account + +1. Create a new service account on the remote cluster: + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: demo-01 + namespace: default +``` + +2. Add RBAC permissions for the service account + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: impersonate-user-groups +subjects: + - kind: ServiceAccount + name: demo-01 + namespace: default +roleRef: + kind: ClusterRole + name: user-groups-impersonator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: user-groups-impersonator +rules: + - apiGroups: [""] + resources: ["users", "groups"] + verbs: ["impersonate"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] +``` + +This will allow WGE to introspect the cluster for available namespaces. + +Once we know what namespaces are available we can test whether the logged in user can access them via impersonation. + +3. Get the token of the service account + +First get the list of secrets of the service accounts by running the following command: + +```sh +kubectl get secrets --field-selector type=kubernetes.io/service-account-token +NAME TYPE DATA AGE +default-token-lsjz4 kubernetes.io/service-account-token 3 13d +demo-01-token-gqz7p kubernetes.io/service-account-token 3 99m +``` + +`demo-01-token-gqz7p` is the secret that holds the token for `demo-01` service account + +To get the token of the service account run the following command: + +```sh +TOKEN=$(kubectl get secret demo-01-token-gqz7p -o jsonpath={.data.token} | base64 -d) +``` + +4. Create a kubeconfig secret + +We'll use a helper script to generate the kubeconfig, save this into `static-kubeconfig.sh`: + +```bash title="static-kubeconfig.sh" +#!/bin/bash + +if [[ -z "$CLUSTER_NAME" ]]; then + echo "Ensure CLUSTER_NAME has been set" + exit 1 +fi + +if [[ -z "$CA_CERTIFICATE" ]]; then + echo "Ensure CA_CERTIFICATE has been set to the path of the CA certificate" + exit 1 +fi + +if [[ -z "$ENDPOINT" ]]; then + echo "Ensure ENDPOINT has been set" + exit 1 +fi + +if [[ -z "$TOKEN" ]]; then + echo "Ensure TOKEN has been set" + exit 1 +fi + +export CLUSTER_CA_CERTIFICATE=$(cat "$CA_CERTIFICATE" | base64) + +envsubst <Details->Endpoint->”Show cluster certificate”. You will need to copy the contents of the certificate into the `ca.crt` file used below. + +```sh +CLUSTER_NAME=demo-01 \ +CA_CERTIFICATE=ca.crt \ +ENDPOINT= \ +TOKEN= ./static-kubeconfig.sh > demo-01-kubeconfig +``` + +Replace the following: + +- CLUSTER_NAME: the name of your cluster i.e. `demo-01` +- ENDPOINT: the API server endpoint i.e. `34.218.72.31` +- CA_CERTIFICATE: path to the CA certificate file of the cluster +- TOKEN: the token of the service account retrieved in the previous step + +Finally create a secret for the generated kubeconfig: + +```sh +kubectl create secret generic demo-01-kubeconfig \ +--from-file=value=./demo-01-kubeconfig +``` + + + + +### Connect a cluster + +:::note Get started first! + +Make sure you've + +1. Added some common RBAC rules into the `clusters/bases` folder, as described in [Getting started](./getting-started.mdx). +2. Configured the cluster bootstrap controller as described in [Getting started](./getting-started.mdx). + +::: + +Create a `GitopsCluster` + +```yaml title="./clusters/management/clusters/demo-01.yaml" +apiVersion: gitops.weave.works/v1alpha1 +kind: GitopsCluster +metadata: + name: demo-01 + namespace: default + # Signals that this cluster should be bootstrapped. + labels: + weave.works/capi: bootstrap +spec: + secretRef: + name: demo-01-kubeconfig +``` + +When the `GitopsCluster` appears in the cluster, the Cluster Bootstrap Controller will install flux on it and by default start reconciling the `./clusters/demo-01` path in your management cluster's git repository. To inspect the Applications and Sources running on the new cluster we need to give permissions to the user accessing the UI. Common RBAC rules like this should be stored in `./clusters/bases`. Here we create a kustomziation to add these common resources onto our new cluster: + +```yaml title="./clusters/demo-01/clusters-bases-kustomization.yaml" +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + creationTimestamp: null + name: clusters-bases-kustomization + namespace: flux-system +spec: + interval: 10m0s + path: clusters/bases + prune: true + sourceRef: + kind: GitRepository + name: flux-system +``` + +Save these 2 files into your git repository. Commit and push. + +Once flux has reconciled the cluster you can inspect your flux resources via the UI! + +## Debugging + +### How to test a kubeconfig secret in a cluster + +To test a kubeconfig secret has been correctly setup apply the following manifest and check the logs after the job completes: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: kubectl +spec: + ttlSecondsAfterFinished: 30 + template: + spec: + containers: + - name: kubectl + image: bitnami/kubectl + args: + [ + "get", + "pods", + "-n", + "kube-system", + "--kubeconfig", + "/etc/kubeconfig/value", + ] + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubeconfig" + readOnly: true + restartPolicy: Never + volumes: + - name: kubeconfig + secret: + secretName: demo-01-kubeconfig + optional: false +``` + +In the manifest above `demo-01-kubeconfig`is the name of the secret that contains the kubeconfig for the remote cluster. + +--- + +# Background + +- [Authentication strategies](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#authentication-strategies) + - [X509 client certificates](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#x509-client-certs): can be used across different namespaces + - [Service account tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens): limited to a single namespace +- [Kubernetes authentication 101 (CNCF blog post)](https://www.cncf.io/blog/2020/07/31/kubernetes-rbac-101-authentication/) +- [Kubernetes authentication (Magalix blog post)](https://www.magalix.com/blog/kubernetes-authentication) diff --git a/website/versioned_docs/version-0.12.0/cluster-management/profiles.mdx b/website/versioned_docs/version-0.12.0/cluster-management/profiles.mdx new file mode 100644 index 0000000000..dcd9e73390 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/profiles.mdx @@ -0,0 +1,102 @@ +--- +title: Profiles +sidebar_position: 6 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Profiles + +:::note BEFORE YOU START +The following instructions require you to make minor changes to the content of your own hosted Helm repository. +::: + +When provisioning new clusters it is often useful to install selected software packages to them as part of their bootstrap process. Weave GitOps Enterprise enables this by installing standard Helm charts to the newly provisioned clusters. This feature lowers the ongoing operational overhead and allows for the clusters to be immediately usable after being provisioned. To set this up you need to: + +1. Annotate a Helm chart to make it available for installation +2. Select which profiles you want installed when creating a cluster + +### 1. Annotate a Helm chart to make it available for installation + +In order for a chart to become available for installation, it needs to include a `weave.works/profile` annotation. For example: + +```yaml title="Chart.yaml" +annotations: + weave.works/profile: observability-profile +apiVersion: v1 +appVersion: 1.0.0 +description: Observability Helm chart for Kubernetes +home: https://github.com/weaveworks/observability-profile +kubeVersion: ">=1.19.0-0" +name: observability +sources: + - https://github.com/weaveworks/observability-profile +version: 1.0.0 +``` + +The annotation value is not important and can be left blank i.e. `""`. Helm charts with the `weave.works/profile` annotation are called _Profiles_. + +Annotations can also be used to determine the order in which profiles will be installed. + +``` +annotations: + weave.works/profile: observability-profile + weave.works/layer: layer-0 +``` + +``` +annotations: + weave.works/profile: podinfo-profile + weave.works/layer: layer-1 +``` + +The profiles will be sorted lexicographically by their layer and those at a higher layer will only be installed after lower layers have been successfully installed and started. + +In this example, `observability-profile` will be installed prior to `podinfo-profile`. In the corresponding HelmReleases, the dependencies can be observed under the `dependsOn` field. + +``` +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + labels: + weave.works/applied-layer: layer-0 + name: cluster-name-observability + namespace: wego-system +... +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + labels: + weave.works/applied-layer: layer-1 + name: cluster-name-podinfo + namespace: wego-system +spec: +... + dependsOn: + - name: cluster-name-observability +... +``` + +### 2. Select which profiles you want installed when creating a cluster + +Currently WGE inspects the current namespace that it is deployed in (in the management cluster) for a `HelmRepository` object named `weaveworks-charts`. This Kubernetes object should be pointing to a Helm chart repository that includes the profiles that are available for installation. + +When creating a cluster from the UI using a CAPI template, these profiles should be available for selection in the `Profiles` section of the template. For example: + +![Profiles Selection](./img/profile-selection.png) + +As shown above, some profiles will be optional whereas some profiles will be required. This is determined when the template is authored and allows for operation teams to control which Helm packages should be installed on new clusters by default. + +To allow editing of the yaml values for required profiles, the `editable` flag can be added in the annotation describing the required profile in the template. For example: + +``` +apiVersion: capi.weave.works/v1alpha1 +kind: CAPITemplate +metadata: + name: connect-a-cluster-with-policies + namespace: default + annotations: + capi.weave.works/profile-0: '{"name": "weave-policy-agent", "editable": true, "version": "0.2.8", "values": "accountId: weaveworks\nclusterId: ${CLUSTER_NAME}" }' +``` diff --git a/website/versioned_docs/version-0.12.0/cluster-management/provider-identities.mdx b/website/versioned_docs/version-0.12.0/cluster-management/provider-identities.mdx new file mode 100644 index 0000000000..47f2d78539 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/cluster-management/provider-identities.mdx @@ -0,0 +1,82 @@ +--- +title: CAPI Provider Identities +hide_title: true +sidebar_position: 4 +--- + +import TierLabel from "../_components/TierLabel"; + +# CAPI Provider Identities + +## Multi-tenancy + +Some Cluster API providers allow you to choose the account or identity that the new cluster will be created with. This is often referred to as _Multi-tenancy_ in the CAPI world. Weave GitOps currently supports: + +- [**AWS** multi-tenancy](https://cluster-api-aws.sigs.k8s.io/topics/multitenancy.html) +- [**Azure** multi-tenancy](https://capz.sigs.k8s.io/topics/multitenancy.html) +- [**vSphere** multi-tenancy](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/blob/master/docs/identity_management.md) + +### Identities and templates + +Our _templates_ describe the properties of the cluster, how many nodes, what version of Kubernetes etc, while the _identity_ is which account will be used to create the cluster. So given in our cluster we have the template: + +```yaml +apiVersion: capi.weave.works/v1alpha1 +kind: CAPITemplate +metadata: + name: capa-cluster-template +spec: + resourcetemplates: + - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AWSCluster + metadata: + name: "${CLUSTER_NAME}" + spec: + region: "${AWS_REGION}" +``` + +and the identity + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: AWSClusterStaticIdentity +metadata: + name: "test-account" +spec: + secretRef: + name: test-account-creds + namespace: capa-system + allowedNamespaces: + selector: + matchLabels: + cluster.x-k8s.io/ns: "testlabel" +``` + +We can select ask Weave GitOps to use the `test-account` when creating the cluster by using the _Infrastructure provider credentials_ dropdown on the _Create new cluster with template_ page: + +![Identity Selection](./img/identity-selection.png) + +The resulting definition will have the identity injected into the appropriate place in the template, for this example: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AWSCluster +metadata: + name: example-cluster +spec: + region: eu-north-1 + identityRef: + kind: AWSClusterStaticIdentity + name: test-account +``` + +### `identityRef`s + +The supported providers implement multi-tenancy by setting an `identityRef` on the the provider cluster object, e.g. `AWSCluster`, `AzureCluster` or `VSphereCluster`. + +Weave GitOps will search _all namespaces_ in the cluster for potential identities that can be used to create a cluster. The following identity `kind`s are currently supported and their corresponding Cluster kinds: + +- `AWSClusterStaticIdentity`: `AWSCluster` +- `AWSClusterRoleIdentity`: `AWSCluster` +- `AzureClusterIdentity`: `AzureCluster` +- `VSphereClusterIdentity`: `VSphereCluster` diff --git a/website/versioned_docs/version-0.12.0/configuration/_category_.json b/website/versioned_docs/version-0.12.0/configuration/_category_.json new file mode 100644 index 0000000000..918fb7cfe5 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/configuration/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Configuration", + "position": 3 +} diff --git a/website/versioned_docs/version-0.12.0/configuration/recommended-rbac-configuration.mdx b/website/versioned_docs/version-0.12.0/configuration/recommended-rbac-configuration.mdx new file mode 100644 index 0000000000..3c7e8b7827 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/configuration/recommended-rbac-configuration.mdx @@ -0,0 +1,179 @@ +--- +title: Recommended RBAC Configuration +sidebar_position: 0 +--- + +This page summarises the contents of the [securing access to the dashboard](securing-access-to-the-dashboard.mdx), +[service account permissions](service-account-permissions.mdx) and [user permissions](user-permissions.mdx). They should be +read in addition to this page in order to understand the suggestions made here and +their ramifications. + +This page is purposefully vague as the intention is to give a broad idea of how +such a system could be implemented, not the specifics as that will be dependent +on your specific circumstances and goals. + +:::note Gitops-Core +These recommendations are for Weave GitOps Core. You can find recommendations for +Weave GitOps Enterprise [here](../enterprise/user-permissions.mdx). +::: + +## Summary + +The general recommendation is to use OIDC and a small number of groups that +are Weave GitOps can impersonate. + +OIDC is the recommended method for managing authentication as it decouples the +need to manage user lists from the application, allowing it to be managed via +a central system designed for that purpose (i.e. the OIDC provider). OIDC also +enables the creation of groups (either via your provider's own systems or by +using a connector like [Dex](../guides/setting-up-dex.md)). + +Configuring Weave GitOps to impersonate kubernetes groups rather than +users has the following benefits: +* A user's permissions for impersonation by Weave GitOps can be separate from + any other permissions that they may or may not have within the cluster. +* Users do not have to be individually managed within the cluster and can have + their permissions managed together. + +## Example set up + +Assume that your company has the following people in OIDC +* Aisha, a cluster admin, who should have full admin access to Weave GitOps +* Brian, lead of team-A, who should have admin permissions to their team's + namespace in Weave GitOps and readonly-otherwise +* June and Jo, developers in team-A who should have read-only access to Weave GitOps. + +You could then create 3 groups: + +* `wego-admin` + - Bound to the `ClusterRole`, created by Helm, `wego-admin-cluster-role` + - Aisha is the only member +* `wego-team-a-admin` + - Bound to a `Role`, using the same permissions as `wego-admin-role`, created + in Team's namespace + - Brian and Aisha are members +* `wego-readonly` + - Bound to a `ClusterRole` that matches `wego-admin-cluster-role` but with + no `patch` permissions. + - Aisha, Brian, June & Jo are all members + +The Weave GitOps service account can then be configured with: +```yaml +rbac: + impersonationResourceNames: ["wego-admin", "wego-team-a-admin", "wego-readonly"] + impersonationResources: ["groups"] +``` +so that only these three groups can be `impersonated` by the service account. + +:::caution Using OIDC for cluster and Weave GitOps Authentication +If the same OIDC provider is used to authenticate a user with the cluster +itself (e.g. for use with `kubectl`) and to Weave GitOps then, depending +on OIDC configuration, they may end up with the super-set of their permissions +from Weave GitOps and any other permissions granted to them. + +This can lead to un-intended consequences (e.g. viewing `secrets`). To avoid +this OIDC providers will often let you configure which groups are returned +to which clients: the Weave GitOps groups should not be returned to the +cluster client (and visa versa). +::: + +### Code + +The yaml to configure these permissions would look roughly like: +```yaml +# Admin cluster role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: wego-admin-cluster-role +rules: + - apiGroups: [""] + resources: ["secrets", "pods" ] + verbs: [ "get", "list" ] + - apiGroups: ["apps"] + resources: [ "deployments", "replicasets"] + verbs: [ "get", "list" ] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +--- +# Read only cluster role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: wego-readonly-role +rules: + # All the 'patch' permissions have been removed + - apiGroups: [""] + resources: ["secrets", "pods" ] + verbs: [ "get", "list" ] + - apiGroups: ["apps"] + resources: [ "deployments", "replicasets"] + verbs: [ "get", "list" ] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list" ] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list" ] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list" ] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +--- +# Bind the cluster admin role to the wego-admin group +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wego-cluster-admin +subjects: +- kind: Group + name: wego-admin # only Aisha is a member + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: wego-admin-cluster-role + apiGroup: rbac.authorization.k8s.io +--- +# Bind the admin role in the team-a namespace for the wego-team-a-admin group +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: wego-team-a-admin-role + namespace: team-a +subjects: +- kind: Group + name: wego-team-a-admin # Aisha & Brian are members + apiGroup: rbac.authorization.k8s.io +roleRef: + # Use the cluster role to set rules, just bind them in the team-a namespace + kind: ClusterRole + name: wego-admin-role + apiGroup: rbac.authorization.k8s.io +--- +# Bind the readonly role to the readonly group +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: wego-readonly-role +subjects: +- kind: Group + name: wego-readonly # Everyone is a member + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: wego-readonly-role + apiGroup: rbac.authorization.k8s.io +--- +``` diff --git a/website/versioned_docs/version-0.12.0/configuration/securing-access-to-the-dashboard.mdx b/website/versioned_docs/version-0.12.0/configuration/securing-access-to-the-dashboard.mdx new file mode 100644 index 0000000000..3f6b01ea2d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/configuration/securing-access-to-the-dashboard.mdx @@ -0,0 +1,71 @@ +--- +title: Securing access to the dashboard +sidebar_position: 1 +--- + +## Dashboard Login + +There are 2 supported methods for logging in to the dashboard: +- Login via an OIDC provider +- Login via a cluster user account + +The recommended method is to integrate with an OIDC provider, as this will let you control permissions for existing users and groups that have already been configured to use OIDC. However, it is also possible to use a cluster user account to login, if an OIDC provider is not available to use. Both methods work with standard Kubernetes RBAC. + +## Login via an OIDC provider + +You may decide to give your engineering teams access to the dashboard, in order to view and manage their workloads. In this case, you will want to secure access to the dashboard and restrict who can interact with it. Weave GitOps integrates with your OIDC provider and uses standard Kubernetes RBAC to give you fine-grained control of the permissions for the dashboard users. + +#### Background + +OIDC extends the OAuth2 authorization protocol by including an additional field (ID Token) that contains information (claims) about a user's identity. After a user successfully authenticates with the OIDC provider, this information is used by Weave GitOps to impersonate the user in any calls to the Kubernetes API. This allows cluster administrators to use RBAC rules to control access to the cluster and also the dashboard. + +#### Configuration + +In order to login via your OIDC provider, you need to create a Kubernetes secret to store the OIDC configuration. This configuration consists of the following parameters: + +| Parameter | Description | Default | +| ------------------| -------------------------------------------------------------------------------------------------------------------------------- | --------- | +| `issuerURL` | The URL of the issuer, typically the discovery URL without a path | | +| `clientID` | The client ID that has been setup for Weave GitOps in the issuer | | +| `clientSecret` | The client secret that has been setup for Weave GitOps in the issuer | | +| `redirectURL` | The redirect URL that has been setup for Weave GitOps in the issuer, typically the dashboard URL followed by `/oauth2/callback ` | | +| `tokenDuration` | The time duration that the ID Token will remain valid, after successful authentication | "1h0m0s" | + +Ensure that your OIDC provider has been setup with a client ID/secret and the redirect URL of the dashboard. + +Create a secret named `oidc-auth` in the `flux-system` namespace with these parameters set: + +```sh +kubectl create secret generic oidc-auth \ + --namespace flux-system \ + --from-literal=issuerURL= \ + --from-literal=clientID= \ + --from-literal=clientSecret= \ + --from-literal=redirectURL= \ + --from-literal=tokenDuration= +``` + +Once the HTTP server starts unauthenticated users will have to click the 'login with OIDC provider' to log in or use the cluster account (if configured). Upon successful authentication, the users' identity will be impersonated in any calls made to the Kubernetes API, as part of any action they take in the dashboard. By default the Helm chart will configure RBAC correctly but it is recommended to read the [service account](service-account-permissions.mdx) and [user](user-permissions.mdx) permissions pages to understand which actions are needed for Weave GitOps to function correctly. + +## Login via a cluster user account + +Before you login via the cluster user account, you need to generate a bcrypt hash for your chosen password and store it as a secret in Kubernetes. There are several different ways to generate a bcrypt hash, this guide uses `gitops get bcrypt-hash` from our CLI: + +Generate the password by running: + +```sh +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q +``` + +Now create a Kubernetes secret to store your chosen username and the password hash: + +```sh +kubectl create secret generic cluster-user-auth \ + --namespace flux-system \ + --from-literal=username=admin \ + --from-literal=password='$2a$10$OS5NJmPNEb13UTOSKngMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q' +``` + +You should now be able to login via the cluster user account using your chosen username and password. Follow the instructions in the next section in order to configure RBAC correctly. diff --git a/website/versioned_docs/version-0.12.0/configuration/service-account-permissions.mdx b/website/versioned_docs/version-0.12.0/configuration/service-account-permissions.mdx new file mode 100644 index 0000000000..ee6601be60 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/configuration/service-account-permissions.mdx @@ -0,0 +1,122 @@ +--- +title: Service Account permissions +sidebar_position: 2 +--- + +This is an explanation of the [kubernetes permissions](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +used by the Weave GitOps service account. This is the service account used by +the application itself (rather than the static user used for demo/emergency +access, the permissions for which are covered in the [static user permissions](user-permissions.mdx) +page) + +The default permissions of the service account are defined in the [helm chart](https://github.com/weaveworks/weave-gitops/tree/main/charts/gitops-server/templates/role.yaml) which +will generate a cluster role with the following permissions: + +```yaml +rules: +# Used to query the cluster +- apiGroups: [""] + resources: ["users", "groups"] # set by rbac.impersonationResources + verbs: [ "impersonate" ] + # resourceNames: [] # set by rbac.impersonationResourceNames +# Used to get OIDC/static user credentials for login +- apiGroups: [""] + resources: [ "secrets" ] + verbs: [ "get", "list" ] + resourceNames: # set by rbac.viewSecretsResourceNames + - "cluster-user-auth" + - "oidc-auth" +# The service account needs to read namespaces to know where it can query +- apiGroups: [ "" ] + resources: [ "namespaces" ] + verbs: [ "get", "list" ] +``` + +These allow the pod to do three things: +* [impersonate](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation) the user and operate in cluster as them +* read the available namespaces (this is required to understand the users' permissions) +* read the `cluster-user-auth` and `oidc-auth` secrets, which are the default secrets + to store the cluster-user account and OIDC configuration (see + [securing access to the dashboard](securing-access-to-the-dashboard.mdx)) + +## The Helm values + +| Value | Description | Default | +|-----------------------------------|---------------------------------------------------------------------|--------------------------------------| +| `rbac.impersonationResources` | Which resource types the service account can impersonate | `["users", "groups"]` | +| `rbac.impersonationResourceNames` | Specific users, groups or services account that can be impersonated | `[]` | +| `rbac.viewSecretsResourceNames` | Specific secrets that can be read | `["cluster-user-auth", "oidc-auth"]` | + + +## Impersonation + +The primary way Weave GitOps queries the Kube API is via `impersonation`, the +application (not the cluster) authenticates the user (either via the static +cluster-user credentials or OIDC) then makes calls to the Kube API on the user's +behalf. This is equivalent to making a kubectl call like: + +```bash +$ kubectl get deployments --as aisha@example.com +``` + +Assuming the user `aisha@example.com` has been granted permissions to get +deployments within the cluster then this will return them. The same occurs +within the application. This makes the proper configuration of the application's +permissions very important as, without proper restrictions it can impersonate +very powerful `users` or `groups`. For example, the `system:masters` is group +is generally bound to the `cluster-admin` role which can do anything. + +For more details of the permissions needed by the user or group see the +[user permissions](user-permissions.mdx) guide. + +### Configuring impersonation + +It is highly recommended that you limit which users and groups that the +application can impersonate by setting `rbac.impersonationResourceNames` in +the Helm chart's `values`. e.g.: + +```yaml +rbac: + impersonationResources: ["groups"] + impersonationResourceNames: + - admin + - dev-team + - qa-team +``` +In this example the application can only impersonate the groups admin, dev-team +and qa-team (this also, implicitly disables the static cluster-user). + +Unfortunately not all OIDC providers support groups so you may need to +manually enumerate users, for example: +```yaml +rbac: + impersonationResources: ["users"] + impersonationResourceNames: + - aisha@example.com + - bill@example.com + - wego-admin # enable the static cluster-user +``` + +A better, albeit more involved, solution is to set up an OIDC connector like +[Dex](../guides/setting-up-dex.md) and use that to manage groups for you. + +## Get namespaces + +The application itself uses get namespace permissions to pre-cache the list of +available namespaces. As the user accesses resources their permissions within +various namespaces is also cached to speed up future operations. + +## Reading the cluster-user-auth and oidc-auth secrets + +The cluster-user-auth and oidc-auth secrets provide information for authenticating +to the application. The former holds the username and bcrypt-hashed password +for the static user and the later holds OIDC configuration. + +The application needs to be able to access these secrets in order to +authenticate users. + +### Configuring secrets + +The `rbac.viewSecretsResourceNames` value allows the operator to change which secrets the +application can read. This is mostly so that, if the static user is not +configured, that secret can be removed; or if the secret to be used is re-named. diff --git a/website/versioned_docs/version-0.12.0/configuration/tls.md b/website/versioned_docs/version-0.12.0/configuration/tls.md new file mode 100644 index 0000000000..3777feba82 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/configuration/tls.md @@ -0,0 +1,50 @@ +--- +title: TLS and certificates +sidebar_position: 4 +--- + +## TLS configuration + +By default the dashboard will listen on 0.0.0.0:9001 with TLS disabled and +without exposing any external connection. + +Exposing services without TLS if not recommended. Without a certificate, a user +can't be sure they are using the right service, and the traffic will be easily +monitored, or even tampered with. All communication between the user and an endpoint +with TLS will be encrypted. + +To expose an external connection, you must first configure TLS. TLS termination +can be provided via an ingress controller or directly by the dashboard. In +either case, the helm release must be updated. To have the dashboard itself +handle TLS, you must create a `tls` secret containing the cert and key: + +```cli +kubectl create secret tls my-tls-secret \ + --cert=path/to/cert/file \ + --key=path/to/key/file +``` + +and reference it from the helm release: + +```yaml + values: + serverTLS: + enabled: true + secretName: "my-tls-secret" +``` + +If you prefer to delegate TLS handling to the ingress controller instead, your +helm release should look like: + +```yaml + values: + ingress: + enabled: true + ... other parameters specific to the ingress type ... +``` + +## cert-manager + +Install [cert-manager](../guides/cert-manager.md) and request a `Certificate` in +the `flux-system` namespace. Provide the name of secret associated with the +certificate to the weave-gitops-enterprise HelmRelease as described above. diff --git a/website/versioned_docs/version-0.12.0/configuration/user-permissions.mdx b/website/versioned_docs/version-0.12.0/configuration/user-permissions.mdx new file mode 100644 index 0000000000..9986c5d0e2 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/configuration/user-permissions.mdx @@ -0,0 +1,95 @@ +--- +title: User permissions +sidebar_position: 3 +--- + +This is an explanation of the [kubernetes permissions](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +needed by users of the Weave GitOps application. As covered in +[service account permissions](service-account-permissions.mdx) +the primary way that the application interacts with the Kube API is via [impersonation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation). +This means that the permissions granted to the users and groups that Weave GitOps +can impersonate determine the scope of actions that it can take within your cluster. + +A minimal set of permissions are generated for the static cluster-user as part +of the [helm chart](https://github.com/weaveworks/weave-gitops/tree/main/charts/gitops-server/templates/admin-user-roles.yaml). + +By default both a ClusterRole and Role are generated for the static cluster-user. +Both have the same permissions with former being optional and the latter being +bound to the `flux-system` namespace (where Flux stores its resources by default). +The default set of rules are configured like this: +```yaml +rules: + # Flux Resources + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: [ "notification.toolkit.fluxcd.io" ] + resources: [ "providers", "alerts" ] + verbs: [ "get", "list", "watch", "patch" ] + + - apiGroups: ["infra.contrib.fluxcd.io"] + resources: ["terraforms"] + verbs: [ "get", "list", "watch", "patch" ] + + # Read access for all other Kubernetes objects + - apiGroups: ["*"] + resources: ["*"] + verbs: [ "get", "list", "watch" ] +``` + + +### Flux Resources + +The resources that Flux works with directly, including the one from TF-controller. + +| Api Group | Resources | Permissions | +| ------------------------------ | ----------------------------------------------------------------------- | ---------------- | +| kustomize.toolkit.fluxcd.io | kustomizations | get, list, patch | +| helm.toolkit.fluxcd.io | helmreleases | get, list, patch | +| source.toolkit.fluxcd.io | buckets, helmcharts, gitrepositories, helmrepositories, ocirepositories | get, list, patch | +| notification.toolkit.fluxcd.io | providers, alerts | get, list | +| infra.contrib.fluxcd.io | terraforms | get, list, patch | + +In order for Weave GitOps to be able to accurately display the state of Flux it +needs to be able to query the [CRDs](https://fluxcd.io/docs/components/) that Flux uses. This is done using the +`get` and `list` permissions + +The `patch` permissions are used for 2 features: to suspend and resume +reconciliation of a resource by modifying the 'spec' of a resource, +and to force reconciliation of a resource by modifying the annotations +of the resource. These features work the same way `flux suspend`, +`flux resume` and `flux reconcile` does on the CLI. + +### Resources managed via Flux + +| Api Group | Resources | Permissions | +|---------------------------|--------------------------------------------------------------------------------|------------------| +| "" | configmaps, secrets, pods, services, persistentvolumes, persistentvolumeclaims | get, list, watch | +| apps | deployments, replicasets, statefulsets | get, list, watch | +| batch | jobs, cronjobs | get, list, watch | +| autoscaling | horizontalpodautoscalers | get, list, watch | +| rbac.authorization.k8s.io | roles, clusterroles, rolebindings, clusterrolebindings | get, list, watch | +| networking.k8s.io | ingresses | get, list, watch | + +Weave GitOps reads basic resources so that it can monitor the effect that Flux has +on what's running. + +Reading `secrets` enables Weave GitOps to monitor the state of Helm releases +as that's where it stores the [state by default](https://helm.sh/docs/faq/changes_since_helm2/#secrets-as-the-default-storage-driver). +For clarity this these are the Helm release objects _not_ the Flux HelmRelease +resource (which are dealt with by the earlier section). + +### Feedback from Flux + +The primary method by which Flux communicates the status of itself is by events, +these will show when reconciliations start and stop, whether they're successful +and information as to why they're not. diff --git a/website/versioned_docs/version-0.12.0/enterprise/_category_.json b/website/versioned_docs/version-0.12.0/enterprise/_category_.json new file mode 100644 index 0000000000..4d389fb09e --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Enterprise", + "position": 10 +} diff --git a/website/versioned_docs/version-0.12.0/enterprise/intro.md b/website/versioned_docs/version-0.12.0/enterprise/intro.md new file mode 100644 index 0000000000..3157bf5434 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/intro.md @@ -0,0 +1,16 @@ +--- +title: Introduction +sidebar_position: 0 +--- + +## Weave GitOps Enterprise + +Weave GitOps Enterprise (WGE) provides ops teams with an easy way to assess the +health of multiple clusters in a single place. It shows cluster information such as +Kubernetes version and number of nodes and provides details about the GitOps operations +on those clusters, such as Git repositories and recent commits. Additionally, it +aggregates Prometheus alerts to assist with troubleshooting. + +## How to purchase + +Get in touch with sales@weave.works to discuss your needs. diff --git a/website/versioned_docs/version-0.12.0/enterprise/multi-tenancy.mdx b/website/versioned_docs/version-0.12.0/enterprise/multi-tenancy.mdx new file mode 100644 index 0000000000..3c4c401a50 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/multi-tenancy.mdx @@ -0,0 +1,132 @@ +--- +title: Multi Tenancy +sidebar_position: 2 +hide_title: true +--- +import TierLabel from "../_components/TierLabel"; + + +# Multi tenancy + +Multi tenancy provides users with the ability to define boundaries to multiple engineering teams working on a single cluster. Through a simple interface it adds permissions to the necessary Kubernetes resources to make it easy for customers to manage their multiple tenants. + +WGE multi tenancy expands on the multi tenancy feature provided by `flux`. In addition to creating the necessary Kubernetes tenancy resources that `flux` adds, multi tenancy in WGE also adds the following: +- Defining tenancy using a single yaml file that serves as a source of truth for the organization +- Makes use of WGE policy features to enforce non Kubernetes native permissions + + +## How it works + +`gitops` command line tool is responsible for creating the multi tenancy resources. The tool is distributed as part of WGE offering. It reads the definitions of a yaml file and can either apply the necessary changes directly to the cluster or output it to stdout so it can be saved into a file and pushed to a repo to be reconciled by `flux`. + +To make use of the policy features, [policy agent](../policy/intro.mdx) needs to be installed in the necessary cluster(s). + +### Tenancy file + +Below is an example of a tenancy file: + +```yaml title="tenancy.yaml" +tenants: + - name: first-tenant + namespaces: + - first-ns + - name: second-tenant + namespaces: + - second-test-ns + - second-dev-ns + allowedRepositories: + - kind: GitRepository + url: https://github.com/testorg/testrepo + - kind: GitRepository + url: https://github.com/testorg/testinfo + - kind: Bucket + url: minio.example.com + - kind: HelmRepository + url: https://testorg.github.io/testrepo + allowedClusters: + - kubeConfig: cluster-1-kubeconfig + - kubeConfig: cluster-2-kubeconfig + teamRBAC: + groupNames: + - foo-group + - bar-group + rules: + - apiGroups: + - '' + resources: + - 'namespaces' + - 'pods' + verbs: + - 'list' + - 'get' + deploymentRBAC: + bindRoles: + - name: foo-role + kind: Role + rules: + - apiGroups: + - '' + resources: + - 'namespaces' + - 'pods' + verbs: + - 'list' + - 'get' +serviceAccount: + name: "reconcilerServiceAccount" +``` + +The file above defines two tenants: `first-tenant` and `second-tenant` as follows: + +- `namespaces`: describes which namespaces should be part of the tenant. Meaning that users who are part of the tenant would have access on those namespaces. +- `allowedRepositories`: limits the `flux` repositories sources that can be used in the tenant's namespaces. This is done through policies and thus requires `policy-agent` to be deployed on the cluster which will stop these sources from being deployed if they aren't allowed as part of the tenant. IT consists of: + - `kind`: the `flux` source kind. Can be: `GitRepository`, `Bucket` and `HelmRepository`. + - `url`: the URL for that source. +- `allowedClusters`: limits which secrets containing cluster configuraton can be used. It stops WGE `GitopsCluster` and flux `Kustomization` from being deployed if they point to a secret not in the list, essentially giving control on which cluster can be added to a multi-cluster setup. Requires `policy-agent`. + - `kubeConfig`: name of the secret that can be used for this tenant. +- `teamRBAC`: Generate Roles and Rolebindings for a list of `groupNames`. This allows you to easily give an OIDC group access to a tenant's resources. When the Weave Gitops Enterprise UI is configured with your OIDC provider, tenants can log in and view the status of the resources they have been granted access to. +- `deploymentRBAC`: generate Roles and Rolebindings for a service account. Can additionally bind to an existing Roles/ClusterRoles. Would use the global service account if specified in the tenants file, otherwise it will use the created service account which takes the tenant name. If not specified a Rolebinding would be created that binds to `cluster-admin` ClusterRole. + +Global options: + +- `serviceAccount`: Override the name of the generated `ServiceAccount` for all tenants. This allows you to easily use the flux controllers' [`--default-service-account`](https://github.com/fluxcd/flux2-multi-tenancy#enforce-tenant-isolation) feature. Tenants do not need to make sure they correctly specify the `serviceAccount` when using `Kustomization` or `HelmRelease` resources. The kustomization-controller and helm-controller will instead look for the `default-service-account` in the namespace being reconciled to and use that. Just configure `serviceAccount.name` and `--default-service-account` to the same value. + +### Gitops create tenants command + +The command creates the necessary resources to apply multi tenancy on the user's cluster. To use the command to apply the resources directly the user needs to have the necessary configuration to connect to the desired cluster. +The command considers the tenancy file as a source of truth and will change the cluster state to match what is currently described in the file. + +For more control on a specific tenant a tenancy file should be used, the command allows the creation of the base resources that defines a tenancy through the arguments: + +```bash +gitops create tenants --name test-tenant --namespace test-ns1 --namespace test-ns2 +``` + +The above will create the namespaces and permissions through a `ServiceAccount` with the same name as the tenant in that case `test-tenant` in each required namespace. +The same can be done through a file as follows: + +```bash +gitops create tenants --from-file tenants.yaml +``` + +To apply the same resources, the file should look like this: + +```yaml +tenants: + - name: test-tenant + namespaces: + - test-ns1 + - test-ns2 +``` + +To check the resources that would be deployed first use the `export` flag: + +```bash +gitops create tenants --from-file tenants.yaml --export +``` + +Applying the resources through the command line is not usually recommended. For WGE the recommended way is to commit the result of the `create tenants` command to source control and let `flux` handle deployment. To achieve that you can save the result of the `export` to a file: + +```bash +gitops create tenants --from-file tenants.yaml --export > clusters/management/tenants.yaml +``` diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/_category_.json b/website/versioned_docs/version-0.12.0/enterprise/pipelines/_category_.json new file mode 100644 index 0000000000..cc964ec8a3 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Pipelines", + "position": 3 +} diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/authorization.mdx b/website/versioned_docs/version-0.12.0/enterprise/pipelines/authorization.mdx new file mode 100644 index 0000000000..90939a6215 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/authorization.mdx @@ -0,0 +1,50 @@ +--- +title: Authorization +sidebar_position: 2 +hide_title: true +--- + +import TierLabel from "../../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Authorization + +This section provides a recommended way to configure RBAC in the context of pipelines. It is oriented to the journey +that you expect your users to have. + +## View pipelines + +In order to view pipelines, users would need to have read access to the `pipeline` resource and the underlying `application` resources. + +An example of configuration to achieve this purpose could be seen below with `pipeline-reader` role and `search-pipeline-reader` +role-binding to allow a group `search-developer` to access pipeline resources within `search` namespace. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pipeline-reader +rules: + - apiGroups: [ "pipelines.weave.works" ] + resources: [ "pipelines" ] + verbs: [ "get", "list", "watch"] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: search-pipeline-reader + namespace: search +subjects: + - kind: Group + name: search-developer + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: pipeline-reader + apiGroup: rbac.authorization.k8s.io +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/getting-started.mdx b/website/versioned_docs/version-0.12.0/enterprise/pipelines/getting-started.mdx new file mode 100644 index 0000000000..dbf68c842c --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/getting-started.mdx @@ -0,0 +1,94 @@ +--- +title: Getting started +sidebar_position: 1 +hide_title: true +--- + +import TierLabel from "../../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Getting started with pipelines + +## Requirements +:::note Feature toggle +The Pipelines feature can be enabled or disabled by setting the `enablePipelines` +flag in the values for the Weave GitOps Enterprise Helm Chart. +::: + +- You have configured Weave GitOps Enterprise [RBAC for pipelines](../authorization) + +## Define a pipeline + +A pipeline allows you to define the route your application is taking in order to make it to production. +There are three main concepts playing in a pipeline: +- the `application` to deliver +- the `environments` that your app will go through in its way to production (general) +- the `deployment targets` or the clusters that each environment has + +You can define a delivery pipeline using a `Pipeline` custom resource. +An example of how it looks for an application `podinfo` is shown below. + +```yaml +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: podinfo-02 + namespace: default +spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: podinfo + environments: + - name: dev + targets: + - namespace: podinfo-02-dev + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: test + targets: + - namespace: podinfo-02-qa + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - namespace: podinfo-02-perf + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: prod + targets: + - namespace: podinfo-02-prod + clusterRef: + kind: GitopsCluster + name: prod + namespace: flux-system +``` + +In the previous example, `podinfo` application is delivered to a traditional pipeline composed by `dev`, `qa`, `perf` and `production` environments. +Each environment is backed by a `GitopsCluster` [deployment target](../../../cluster-management/managing-existing-clusters/). + +For more details about the spec of a pipeline [see here](spec/v1alpha1/pipeline.mdx) + +## View the list of pipelines + +Once flux has created your pipeline you can navigate to the pipelines view to see it. + +![view pipelines](img/view-pipelines.png) + +Pipeline list view show the list of pipelines you have access to. For each pipeline, a simplified view of the pipeline +is shown with the application `Type` and `Environments` it goes through. + +## View the details of a pipeline + +Once you have selected a pipeline from the list, you will navigate to its details view. +In pipeline details view you could view the current status of your application by environment and deployment +target. + +![view pipeline details](img/view-pipeline-details.png) + diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/pipelines-table-create.png b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/pipelines-table-create.png new file mode 100644 index 0000000000..d4e4061b5a Binary files /dev/null and b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/pipelines-table-create.png differ diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/pipelines-templates.png b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/pipelines-templates.png new file mode 100644 index 0000000000..52dac5871e Binary files /dev/null and b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/pipelines-templates.png differ diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/promotion-pr.png b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/promotion-pr.png new file mode 100644 index 0000000000..74f7733618 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/promotion-pr.png differ diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/view-pipeline-details.png b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/view-pipeline-details.png new file mode 100644 index 0000000000..cc03fad855 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/view-pipeline-details.png differ diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/view-pipelines.png b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/view-pipelines.png new file mode 100644 index 0000000000..c043642a3c Binary files /dev/null and b/website/versioned_docs/version-0.12.0/enterprise/pipelines/img/view-pipelines.png differ diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/intro.mdx b/website/versioned_docs/version-0.12.0/enterprise/pipelines/intro.mdx new file mode 100644 index 0000000000..589341038d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/intro.mdx @@ -0,0 +1,32 @@ +--- +title: Introduction +sidebar_position: 0 +hide_title: true +--- + +import TierLabel from "../../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Pipelines + +As [wikipedia defines](https://en.wikipedia.org/wiki/Continuous_delivery), Continuous delivery is + +>a software engineering approach in which teams produce software in short cycles, +>ensuring that the software can be reliably released at any time and, when releasing the software, without doing so manually. +>It aims at building, testing, and releasing software with greater speed and frequency. + +>Continuous delivery is enabled through the deployment pipeline. +>The purpose of the deployment pipeline has three components: visibility, feedback, and continually deploy.[12] + +Weave Gitops Enterprise Pipelines allows you to define your deployment pipelines to enable continuous delivery for +your gitops applications. + +As part of Weave GitOps Enterprise, you can + +- [Define a delivery pipeline](../getting-started/#define-a-pipeline) that you want you application to follow +- [View a pipeline](../getting-started/#view-pipeline-list) and check the status of your deployments. + +Now that you know what delivery pipelines can do for you, follow the [guide to get started](../getting-started). \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/pipeline-templates.mdx b/website/versioned_docs/version-0.12.0/enterprise/pipelines/pipeline-templates.mdx new file mode 100644 index 0000000000..de23e63105 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/pipeline-templates.mdx @@ -0,0 +1,281 @@ +--- +title: Using GitOpsTemplates for Pipelines +sidebar_position: 4 +hide_title: true +--- + +import TierLabel from "../../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Using GitOpsTemplates for Pipelines + +To create new Pipelines and their required resources from within Weave GitOps Enterprise, you can leverage [GitOpsTemplates](../../gitops-templates/templates.mdx) to help platform teams scale for developer self-service. + +This document will provide example configuration which could be adapted for use within your own organization, based on your [tenancy model](https://kubernetes.io/blog/2021/04/15/three-tenancy-models-for-kubernetes/) of choice. + +We will cover the creation of: +- Pipelines +- Alerts +- Providers + +Secrets, required for authentication and authorization between leaf and management clusters as well as to Git, are out of scope for this document and would need to be handled by your secret management solution of choice. + +For advice on Secrets Management, you can refer to the Flux guide [here](https://fluxcd.io/flux/security/secrets-management/) or contact [Weaveworks](mailto:sales@weave.works) for assistance. + +Templates can include a single resource or multiple resources depending on your use case, for example - you may want to only create the Pipeline custom resource to associate existing HelmReleases, or you could create the HelmReleases, notification controller resources, and Pipeline all in a single template. They are highly customizable to suit your teams' needs. + +## Adding new resources from within the Weave GitOps Enterprise dashboard +GitOpsTemplates are custom resources installed onto the management cluster where Weave GitOps Enterprise resides. To add a new Pipeline, click `Create a Pipeline` from within the Pipeline view, which will take you to a pre-filtered list of templates with the label: `weave.works/template-type: pipeline`. + +![Create Pipeline button in Pipeline view](img/pipelines-table-create.png) + + The `Templates` view (shown below) lists all templates for which a given user has the appropriate permission to view. You can install GitOpsTemplates into different namespaces, and apply standard kubernetes RBAC to limit which teams can utilize which templates. You could additionally configure [Policy](../../policy/intro.mdx) to enforce permitted values within a template. + +![Templates view showing Pipeline templates](img/pipelines-templates.png) + +## Example GitOpsTemplates + +This section provides examples to help you build your own templates for Pipelines. + +### Pipeline - Visualization only + +:::note Included Sample +This template is shipped by default with Weave GitOps Enterprise to help you get started with Pipelines. +::: + +For flexibility, this allows the user of the template to specify the names of the Clusters where the application is deployed, and to vary the namespace per cluster. This means it would even work in a tenancy model where environments co-exist on the same cluster and use namespaces for isolation. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsTemplate +metadata: + name: pipeline-sample + namespace: default # Namespace where the GitOpsTemplate is installed, consider that a team will need READ access to this namespace and the custom resource + labels: + weave.works/template-type: pipeline +spec: + description: Sample Pipeline showing visualization of two helm releases across two environments. + params: + - name: RESOURCE_NAME # This is a required parameter name to enable Weave GitOps to write to your Git Repository + description: Name of the Pipeline + - name: RESOURCE_NAMESPACE + description: Namespace for the Pipeline on the management cluster + default: flux-system # default values make it easier for users to fill in a template + - name: FIRST_CLUSTER_NAME + description: Name of GitopsCluster object for the first environment + - name: FIRST_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: FIRST_APPLICATION_NAME + description: Name of the HelmRelease for your application in the first environment + - name: FIRST_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + - name: SECOND_CLUSTER_NAME + description: Name of GitopsCluster object for the second environment + - name: SECOND_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: SECOND_APPLICATION_NAME + description: Name of the HelmRelease for your application in the second environment + - name: SECOND_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + resourcetemplates: + - apiVersion: pipelines.weave.works/v1alpha1 + kind: Pipeline + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: ${APPLICATION_NAME} + environments: + - name: First-Environment + targets: + - namespace: ${FIRST_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${FIRST_CLUSTER_NAME} + namespace: ${FIRST_CLUSTER_NAMESPACE} + - name: Second-Environment + targets: + - namespace: ${SECOND_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${SECOND_CLUSTER_NAME} + namespace: ${SECOND_CLUSTER_NAMESPACE} +``` + +### Pipeline - Multi-cluster promotion + +This example extends the above to add a promotion strategy, in this case it will raise a pull request to update the application version in subsequent environments. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsTemplate +metadata: + name: pipeline-sample + namespace: default + labels: + weave.works/template-type: pipeline +spec: + description: Sample Pipeline showing visualization of two helm releases across two environments. + params: + - name: RESOURCE_NAME + description: Name of the Pipeline + - name: RESOURCE_NAMESPACE + description: Namespace for the Pipeline on the management cluster + default: flux-system + - name: FIRST_CLUSTER_NAME + description: Name of GitopsCluster object for the first environment + - name: FIRST_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: FIRST_APPLICATION_NAME + description: Name of the HelmRelease for your application in the first environment + - name: FIRST_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + - name: SECOND_CLUSTER_NAME + description: Name of GitopsCluster object for the second environment + - name: SECOND_CLUSTER_NAMESPACE + description: Namespace where this object exists + default: default + - name: SECOND_APPLICATION_NAME + description: Name of the HelmRelease for your application in the second environment + - name: SECOND_APPLICATION_NAMESPACE + description: Namespace for this application + default: flux-system + - name: APPLICATION_REPO_URL + description: URL for the git repository containing the HelmRelease objects + - name: APPLICATION_REPO_BRANCH + description: Branch to update with new version + - name: GIT_CREDENTIALS_SECRET + description: Name of the secret in RESOURCE_NAMESPACE containing credentials to create pull requests + resourcetemplates: + - apiVersion: pipelines.weave.works/v1alpha1 + kind: Pipeline + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: ${APPLICATION_NAME} + environments: + - name: First-Environment + targets: + - namespace: ${FIRST_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${FIRST_CLUSTER_NAME} + namespace: ${FIRST_CLUSTER_NAMESPACE} + - name: Second-Environment + targets: + - namespace: ${SECOND_APPLICATION_NAMESPACE} + clusterRef: + kind: GitopsCluster + name: ${SECOND_CLUSTER_NAME} + namespace: ${SECOND_CLUSTER_NAMESPACE} + promotion: + pull-request: + url: ${APPLICATION_REPO_URL} + branch: ${APPLICATION_REPO_BRANCH} + secretRef: + name: ${GIT_CREDENTIALS_SECRET} +``` + +#### Git credentials +For guidance on configuring credentials - see instructions in the [Promoting Applications](../promoting-applications#create-credentials-secret) documentation. + +#### Promotion marker to be added to HelmRelease in `Second-Environment` +A comment would need to be added to the HelmRelease or Kustomization patch where the `spec.chart.spec.version` is defined. + +For example, if the values used in the above template were as follows: + +```yaml +RESOURCE_NAME=my-app +RESOURCE_NAMESPACE=pipeline-01 +``` + +Then the marker would be: + +```yaml +# {"$promotion": "pipeline-01:my-app:Second-Environment"} +``` + +More guidance on adding markers can be found [here](../promoting-applications#add-markers-to-app-manifests). + +### Alerts and Providers +This example shows how you can configure multiple resources in a single template, and simplify creation through common naming strategies. The notification controller is used to communicate update events from the leaf clusters where Applications are deployed, to the management cluster where the Pipeline Controller resides and orchestrates. + +For the `Alert`, this template is filtering events to detect when an update has occurred. Depending on your use case, you could use different filtering. + +For the `Provider`, this template uses authenticated (HMAC) communication to the promotion endpoint, where a secret will need to be present on both the management cluster and leaf cluster(s). For simplicity, a `generic` provider could be used instead - which would not require the secret. + +```yaml +apiVersion: templates.weave.works/v1alpha1 +kind: GitOpsTemplate +metadata: + name: pipeline-notification-resources + namespace: default + labels: + weave.works/template-type: application # These are generic Flux resources rather than Pipeline-specific +spec: + description: Creates flux notification controller resources for a cluster, required for promoting applications via pipelines. + params: + - name: RESOURCE_NAME + description: Name for the generated objects, should match the target Application (HelmRelease) name. + - name: RESOURCE_NAMESPACE + description: Namespace for the generated objects, should match the target Application (HelmRelease) namespace. + - name: PROMOTION_HOST + description: Host for the promotion webhook on the management cluster, i.e. "promotions.example.org" + - name: SECRET_REF + description: Name of the secret containing HMAC key in the token field + - name: ENV_NAME + description: Environment the cluster is a part of within a pipeline. + resourcetemplates: + - apiVersion: notification.toolkit.fluxcd.io/v1beta1 + kind: Provider + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + address: http://${PROMOTION_HOST}/promotion/${APP_NAME}/${ENV_NAME} + type: generic-hmac + secretRef: ${SECRET_REF} + - apiVersion: notification.toolkit.fluxcd.io/v1beta1 + kind: Alert + metadata: + name: ${RESOURCE_NAME} + namespace: ${RESOURCE_NAMESPACE} + spec: + providerRef: + name: ${RESOURCE_NAME} + eventSeverity: info + eventSources: + - kind: HelmRelease + name: ${RESOURCE_NAME} + exclusionList: + - ".*upgrade.*has.*started" + - ".*is.*not.*ready" + - "^Dependencies.*" +``` + +## Summary +GitOpsTemplates provide a highly flexible way for platform and application teams to work together with Pipelines. + +You can hard-code values, offer a range of accepted values, or leave open to the consumer of the template to provide input based on your organization's requirements. + +Templates are subject to RBAC as with any Kubernetes resource, enabling you to easily control which tenants have access to which templates. + +For full details on GitOpsTemplates, be sure to read our [documentation](../../gitops-templates/templates.mdx). + + + diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/promoting-applications.mdx b/website/versioned_docs/version-0.12.0/enterprise/pipelines/promoting-applications.mdx new file mode 100644 index 0000000000..25a4809d9d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/promoting-applications.mdx @@ -0,0 +1,252 @@ +--- +title: Promoting applications +sidebar_position: 3 +hide_title: true +--- + +import TierLabel from "../../_components/TierLabel"; + +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Promoting applications through pipeline environments + +Pipelines allow you to configure automatic promotions of applications through a consecutive set of environments, e.g. from dev to staging to production. The environments are defined in the `Pipeline` resource itself so that each pipeline governs a single application and all the environments to which it is deployed. + +:::note +At the moment only applications defined as Flux `HelmReleases` are supported in automatic promotions. +::: + +
+ +![an example promotion PR](img/promotion-pr.png) + +
An example of a pull request for an application promotion
+
+ +The [Getting started guide](../getting-started) describes how to create a basic pipeline for an application so you can visualize its deployments across a series of environments. You may also configure a pipeline in order to promote applications across a series of environments. +There are currently two supported strategies for application promotions: +- Pull request strategy: this strategy is used for applications that are delivered via Flux to all environments of a pipeline. Typically, the versions of these applications are stored in Git and therefore pull requests can be used to update them as part of a promotion. +- Notification strategy: this strategy is used when an external CI system is responsible for promoting an application across the environments of a pipeline. In this strategy, the notification controller running on the management cluster is used to forward notifications of succesful promotions to external CI systems. + +Before configuring any of the above promotion strategies, you need to setup notifications from all your environments so that whenever a new version gets deployed, the promotion webhook component of the pipeline controller is notified and takes an action based on the pipeline definition. The rest of this guide describes the configuration needed to setup application promotion via pipelines. + +## Expose the promotion webhook + +Applications deployed in leaf clusters use the Flux notification controller running on each leaf cluster, to notify the management cluster of a successful promotion. This requires network connectivity to be established between the leaf cluster and the management cluster. + +The component responsible for listening to incoming notifications from leaf clusters is the pipeline controller. It hosts a webhook service that needs to be exposed via an ingress resource to make it available for external calls. Exposing the webhook service is done via the Weave GitOps Enterprise Helm chart values and the configuration used depends on your environment. The example below shows the configuration for NGINX ingress controller and needs to be adjusted if another ingress controller is used: + +```yaml +spec: + values: + enablePipelines: true + pipeline-controller: + promotion: + ingress: + enabled: true + className: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt + hosts: + - host: promotions.example.org + paths: + - path: /?(.*) + pathType: ImplementationSpecific + tls: + - secretName: promotions-tls + hosts: + - promotions.example.org +``` + +You will need the externally reachable URL of this service later on in this guide. + +## Setup notifications from leaf clusters + +Once the webhook service is exposed over HTTP/S, you need to create alert/provider resources to send notifications to it from leaf clusters. These notifications represent successful promotions for applications running on the leaf clusters. + +Successful promotion events are triggered by Flux's [notification controller](https://fluxcd.io/flux/components/notification/). You create a Provider pointing to the promotion webhook exposed earlier and an Alert targeting the app's HelmRelease: + +```yaml +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Provider +metadata: + name: promotion-my-app +spec: + address: "https://promotions.example.org/promotion/pipeline-01/my-app/dev" + type: generic-hmac + secretRef: + name: hmac-secret +``` + +In the example above, the `generic-hmac` Provider is used to ensure notifications originate from authenticated sources. The referenced Secret, should include a `token` field which holds the HMAC key. The same HMAC key must be specified in the Secret referenced by the `.spec.promotion.secretRef.name` field, so that the pipeline controller can verify any incoming notifications. For more information on the `generic-hmac` Provider, please refer to the notification controller [docs](https://fluxcd.io/flux/components/notification/provider/#generic-webhook-with-hmac). + +Note that by default, the promotion webhook endpoint is exposed at `/promotion` as shown in the example above. However you may use rewrite rules in your ingress configuration to omit it, if desired. For example, if using NGINX ingress controller, you may use the following annotation: +```yaml +annotations: + nginx.ingress.kubernetes.io/rewrite-target: /promotion/$1 +``` +The Provider address can then be set as `https://promotions.example.org/pipeline-01/my-app/dev`. + +:::tip +You may also use the [generic webhook provider type that supports HMAC verification](https://fluxcd.io/flux/components/notification/provider/#generic-webhook-with-hmac) to ensure incoming notifications originate from authenticated sources. +::: + +The `address` field's URL path is comprised of 3 components again: + +1. The namespace of the app's pipeline. +1. The name of the pipeline resource. +1. The origin environment's name. This is the name of the environment that the event is created in, e.g. "dev" for events coming from the "dev" environment. + +Weave GitOps Enterprise can then parse the incoming URL path to identify the pipeline resource and look up the next environment for the defined promotion action. + +An example Alert might look like this: + +```yaml + apiVersion: notification.toolkit.fluxcd.io/v1beta1 + kind: Alert + [...] + spec: + eventSeverity: info + eventSources: + - kind: HelmRelease + name: my-app + exclusionList: + - .*upgrade.*has.*started + - .*is.*not.*ready + - ^Dependencies.* + providerRef: + name: promotion-my-app +``` + +:::note +Be aware to create the Provider/Alert tuple on each of the leaf clusters targeted by a pipeline. +::: + +Now as soon as the `HelmRelease` on the first environment defined in the pipeline is bumped (e.g. by Flux discovering a new version in the Helm repository), an event is sent to the promotion webhook which will determine the next action based on the pipeline definition and chosen strategy. The rest of this guide describes how to setup up any of the available strategies depending on your requirements. + +## Pull request + +This section covers adding a promotion by pull request (PR) strategy, so that whenever the application defined in a pipeline is upgraded in one of the pipeline's environments, a GitHub PR is created that updates the manifest file setting the application version in the next environment. + +The dynamic nature of GitOps deployments requires you to assist Weave GitOps a little with information on which repository hosts the manifest files, how to authenticate with the repository and the GitHub API, and which file hosts the version definition for each environment. + +### Create credentials secret + +In order to authenticate with the GitHub repository hosting the manifests, you need to create a Secret with credentials used for cloning. The contents of this secret are the same you use for [GitRepositories](https://fluxcd.io/flux/components/source/gitrepositories/#secret-reference). In addition to these you need to include a `token` field containing a GitHub access token that is used to authenticate with the GitHub API when creating a pull request. Here is an example Secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: promotion-credentials + namespace: pipeline-01 +data: + username: ZXhhbXBsZQ== + password: ZXhhbXBsZS1wYXNzd29yZA== + token: Z2hwX01IL3RsTFpXTXZMY0FxVWRYY1ZGL0lGbzh0WDdHNjdsZmRxWQ== + hmac-key: OEIzMTNBNjQ0REU0OEVGODgxMTJCQ0VFNTQ3NkE= +type: Opaque +``` + +:::note +The GitHub token provided in the `token` field needs to be given permission to create pull requests in the pipeline's repository (defined in `.spec.promotion.pull-request.url`). The `hmac-key` field must match the key used for the Provider resource (.spec.secretRef), if specified in the leaf clusters. +::: + +:::caution +The Secret needs to reside in the same Namespace as the Pipeline resource on the management cluster. +::: + +### Define promotion in pipeline resource + +The field `.spec.promotion.pull-request` defines details about the Git repository used for promoting the given app. Set the `secretRef.name` field to the name of the Secret created in the previous step and the `url` and `branch` fields to the Git repository's URL and optionally a specific branch (if the branch is not set, it defaults to `main`). If using the `generic-hmac` Provider from leaf clusters, also set the `.spec.promotion.secretRef.name` to the name of the Secret created previously. + +### Add markers to app manifests + +The discovery of the version field is done using deterministic markers in a YAML manifest file. An example `HelmRelease` manifest with such a marker looks like this: + +```yaml +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +spec: + chart: + spec: + version: 0.13.7 # {"$promotion": "pipeline-01:my-app:prod"} +[...] +``` + +The value of the `$promotion` field in the comment is comprised of 3 components separated by colons: + +1. The first field is the Namespace of the pipeline resource that the app is part of. In the example above this is `pipeline-01`. +1. The second field denotes the name of the pipeline resource. +1. The third field is the name of the environment that this specific HelmRelease targets. The environment name in the marker needs to match with the `name` field of one of the environments defined in the pipeline's `.spec.environments` array. + +Weave GitOps Enterprise will look for this marker whenever it receives an event from the respective HelmRelease of one of the leaf clusters and patch the file with the version denoted in the event (see the section above for instructions on setting up notification events from leaf clusters). Finally, it will create a GitHub PR to update the version of the application for the next environment in the pipeline. + +## Notification + +This section explains how to configure pipelines to work with external CI systems that are responsible for application promotions. + +This strategy uses the notification controller running on the management cluster, to forward any notifications received by the promotion webhook, from leaf clusters to external CI systems. This requires to [patch](https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-notifications-for-third-party-controllers) the Flux manifests of the management cluster, in order to allow objects of type `Pipeline` to be used as event sources. An example of a patch applied to enable this is shown below: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- gotk-components.yaml +- gotk-sync.yaml +patches: +- patch: | + - op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/- + value: Pipeline + target: + kind: CustomResourceDefinition + name: alerts.notification.toolkit.fluxcd.io +``` + +You can now create Provider/Alert resources on the management cluster to forward notifications to external systems. For example, the Provider resource shown below is used to invoke a GitHub Actions workflow on a repository: +```yaml +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Provider +metadata: + name: promotion-my-app-via-github-actions +spec: + type: githubdispatch + address: https://github.com/my-org/my-app-repo + secretRef: + name: github-credentials +``` + +To use this Provider, add an Alert that uses the pipeline resource defined on the management cluster as an event source. An example of such an Alert is shown below: + +```yaml +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Alert +metadata: + name: promotion-my-app-via-github-actions +spec: + eventSeverity: info + eventSources: + - kind: Pipeline + name: my-app + namespace: my-app-ns + providerRef: + name: promotion-my-app-via-github-actions +``` + +The notification controller running on the management cluster is now configured to forward any promotion notifications received from leaf clusters. To actually use this strategy from a pipeline, set the promotion field as shown below: + +```yaml +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: my-app + namespace: my-app-ns +spec: +[...] + promotion: + notification: {} +``` + +Promotion notifications from leaf clusters should now be forwarded via the notification controller running on the management cluster and should include information about the version of the application being promoted. \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/spec/_category_.json b/website/versioned_docs/version-0.12.0/enterprise/pipelines/spec/_category_.json new file mode 100644 index 0000000000..9b5c74a020 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/spec/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Reference", + "position": 4 +} diff --git a/website/versioned_docs/version-0.12.0/enterprise/pipelines/spec/v1alpha1/pipeline.mdx b/website/versioned_docs/version-0.12.0/enterprise/pipelines/spec/v1alpha1/pipeline.mdx new file mode 100644 index 0000000000..7127e23d6a --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/pipelines/spec/v1alpha1/pipeline.mdx @@ -0,0 +1,213 @@ +--- +title: Pipeline +sidebar_position: 1 +hide_title: true +--- +# Pipeline + +The Pipeline API defines a resource for continuous delivery pipelines. + +An example of a fully defined pipeline that creates pull requests for application promotions is shown below. + +```yaml +apiVersion: pipelines.weave.works/v1alpha1 +kind: Pipeline +metadata: + name: podinfo-02 + namespace: default +spec: + appRef: + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: podinfo + environments: + - name: dev + targets: + - namespace: podinfo-02-dev + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: test + targets: + - namespace: podinfo-02-qa + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - namespace: podinfo-02-perf + clusterRef: + kind: GitopsCluster + name: dev + namespace: flux-system + - name: prod + targets: + - namespace: podinfo-02-prod + clusterRef: + kind: GitopsCluster + name: prod + namespace: flux-system + promotion: + pull-request: + url: https://github.com/my-org/my-app-repo + secretRef: + name: github-credentials +``` + +## Specification + +The documentation for version `v1alpha1` of a `Pipeline` resource is found next. + +### Pipeline + + +```go +// Pipeline is the Schema for the pipelines API +type Pipeline struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PipelineSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status PipelineStatus `json:"status,omitempty"` +} + +type PipelineSpec struct { + // Environments is a list of environments to which the pipeline's application is supposed to be deployed. + // +required + Environments []Environment `json:"environments"` + // AppRef denotes the name and type of the application that's governed by the pipeline. + // +required + AppRef LocalAppReference `json:"appRef"` + // Promotion defines details about how promotions are carried out between the environments + // of this pipeline. + // +optional + Promotion *Promotion `json:"promotion,omitempty"` +} + +type Environment struct { + // Name defines the name of this environment. This is commonly something such as "dev" or "prod". + // +required + Name string `json:"name"` + // Targets is a list of targets that are part of this environment. Each environment should have + // at least one target. + // +required + Targets []Target `json:"targets"` +} + +type Target struct { + // Namespace denotes the namespace of this target on the referenced cluster. This is where + // the app pointed to by the environment's `appRef` is searched. + // +required + Namespace string `json:"namespace"` + // ClusterRef points to the cluster that's targeted by this target. If this field is not set, then the target is assumed + // to point to a Namespace on the cluster that the Pipeline resources resides on (i.e. a local target). + // +optional + ClusterRef *CrossNamespaceClusterReference `json:"clusterRef,omitempty"` +} + +// Promotion defines all the available promotion strategies. All of the fields in here are mutually exclusive, i.e. you can only select one +// promotion strategy per Pipeline. Failure to do so will result in undefined behaviour. +type Promotion struct { + // PullRequest defines a promotion through a GitHub Pull Request. + // +optional + PullRequest *PullRequestPromotion `json:"pull-request,omitempty"` + // Notification defines a promotion where an event is emitted through Flux's notification-controller each time an app is to be promoted. + // +optional + Notification *NotificationPromotion `json:"notification,omitempty"` + // SecrefRef reference the secret that contains a 'hmac-key' field with HMAC key used to authenticate webhook calls. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` +} + +type PullRequestPromotion struct { + // The git repository URL used to patch the manifests for promotion. + // +required + URL string `json:"url"` + // The branch to checkout after cloning. Note: This is just the base + // branch and does not denote the branch used to create a PR from. The + // latter is generated automatically and cannot be provided. If not specified + // the default "main" is used. + // +optional + Branch string `json:"branch"` + // SecretRef specifies the Secret containing authentication credentials for + // the git repository and for the GitHub API. + // For HTTPS repositories the Secret must contain 'username' and 'password' + // fields. + // For SSH repositories the Secret must contain 'identity' + // and 'known_hosts' fields. + // For the GitHub API the Secret must contain a 'token' field. + // +required + SecretRef meta.LocalObjectReference `json:"secretRef"` +} + +type NotificationPromotion struct{} + +``` + +### References + +```go +// LocalAppReference is used together with a Target to find a single instance of an application on a certain cluster. +type LocalAppReference struct { + // API version of the referent. + // +required + APIVersion string `json:"apiVersion"` + + // Kind of the referent. + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// CrossNamespaceClusterReference contains enough information to let you locate the +// typed Kubernetes resource object at cluster level. +type CrossNamespaceClusterReference struct { + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent. + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` + + // Namespace of the referent, defaults to the namespace of the Kubernetes resource object that contains the reference. + // +optional + Namespace string `json:"namespace,omitempty"` +} +``` + +### Status + +```go +type PipelineStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Pipeline. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} +``` + +#### Condition Reasons +```go +// Reasons are provided as utility, and are not part of the declarative API. +const ( + // TargetClusterNotFoundReason signals a failure to locate a cluster resource on the management cluster. + TargetClusterNotFoundReason string = "TargetClusterNotFound" + // TargetClusterNotReadyReason signals that a cluster pointed to by a Pipeline is not ready. + TargetClusterNotReadyReason string = "TargetClusterNotReady" + // ReconciliationSucceededReason signals that a Pipeline has been successfully reconciled. + ReconciliationSucceededReason string = "ReconciliationSucceeded" +) +``` + diff --git a/website/versioned_docs/version-0.12.0/enterprise/releases.mdx b/website/versioned_docs/version-0.12.0/enterprise/releases.mdx new file mode 100644 index 0000000000..05d1931cb1 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/releases.mdx @@ -0,0 +1,182 @@ +--- +title: Releases +sidebar_position: 4 +hide_title: true +--- +import TierLabel from "../_components/TierLabel"; + +# Releases + +## v0.11.0 +2022-11-25 + +### Highlights + +#### GitOpsTemplates +- We are working towards unifying CAPI and GitOps Templates under a single umbrella. For those already using CAPITemplates, we will ensure a smooth transition is possible by making use of a conversion hooks. There are some breaking changes for GitOpsTemplates as part of this transitionary period, so be sure to check the guidance under [Breaking Changes](#breaking-changes). +- We now retain the ordering of parameters in the template instead of sorting them alphabetically. Providing to the author control in what sequence the parameters are rendered in the form and thus present a more logically grouped set of parameters to the end consumer. +- You can control what [delimiters](../../gitops-templates/templates/#custom-delimiters-for-rendertype-templating) you want to use in a template. This provides flexibility for if you want to use the syntax for dynamic functions like the [helper functions](../../gitops-templates/templates/#supported-functions-from-sprig-library) we support. + +#### Pipelines +- This [feature](pipelines/intro.mdx) is now enabled by default when you install the Weave GitOps Enterprise Helm Chart. You can toggle this with the `enablePipelines` flag. +- GitOpsTemplates are a highly flexible way to create new resources - including Pipelines. We now provide a shortcut on the Pipelines table view to navigate you to Templates with the `weave.works/template-type=pipeline` label. + +#### Telemetry +This release incorporates anonymous aggregate user behavior analytics to help us continuously improve the product. As an Enterprise customer, this is enabled by default. You can learn more about this [here](../../feedback-and-telemetry/#anonymous-aggregate-user-behavior-analytics). + +### Dependency versions +- weave-gitops v0.11.0 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) pipeline-controller v0.0.11 +- (optional) policy-agent 2.1.1 + +### Breaking changes + +#### GitOpsTemplates and CAPITemplates +We are making these changes to provide a unified and intuitive self-service experience within Weave GitOps Enterprise, removing misleading and potentially confusing terminology born from when only Clusters were backed by Templates. + +**New API Group for the GitOpsTemplate CRD** +- old: `clustertemplates.weave.works` +- new: `templates.weave.works` + +After upgrading Weave GitOps Enterprise which includes the updated CRD: +1. Update all your GitOpsTemplates in Git changing all occurrences of `apiVersion: clustertemplates.weave.works/v1alpha1` to `apiVersion: templates.weave.works/v1alpha1`. +2. Commit, push and reconcile. They should now be viewable in the Templates view again. +3. Clean up the old CRD. As it stands: + - `kubectl get gitopstemplate -A` will be empty as it is pointing to the old `clustertemplates.weave.works` CRD. + - `kubectl get gitopstemplate.templates.weave.works -A` will work +To fix the former of the commands, remove the old CRD (helm does not do this automatically for safety reasons): + - `kubectl delete crd gitopstemplates.clustertemplates.weave.works` + - You may have to wait up to 5 minutes for your local kubectl CRD cache to invalidate, then `kubectl get gitopstemplate -A` should be working as usual + +**Template Profiles / Applications / Credentials sections are hidden by default** + +For both `CAPITemplates` and `GitopsTemplates` the default visibility for all sections in a template has been set to `"false"`. To re-enable profiles or applications on a template you can tweak the annotations + +```yaml +annotations: + templates.weave.works/profiles-enabled: "true" # enable profiles + templates.weave.works/kustomizations-enabled: "true" # enable applications + templates.weave.works/credentials-enabled: "true" # enable CAPI credentials +``` + +**The default values for a profile are not fetched and included in a pull-request** + +Prior to this release WGE would fetch the default values.yaml for every profile installed and include them in the `HelmReleases` in the Pull Request when rendering out the profiles of a template. + +This was an expensive operation and occasionally led to timeouts. + +The new behaviour is to omit the values and fall back to the defaults included in the helm-chart. This sacrifices some UX (being able to see all the defaults in the PR and tweak them) to improve performance. **There should not be any final behaviour changes to the installed charts**. + +You can still view and tweak the `values.yaml` when selecting profiles to include on the "Create resource (cluster)" page. If changes are made here the updated values.yaml will be included. + +## v0.10.2 +2022-11-15 + +### Highlights +- Retain template parameter ordering. +- Allow configuration of the delimiters in templates. +- Add create a pipeline button. +- add missing support for policy version v2beta2 to tenancy cmd. + +### Dependency versions +- weave-gitops v0.10.2 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 2.1.1 + +## v0.10.1 +2022-11-10 + +### Highlights + +- Create non-cluster resources / Add Edit option to resources with create-request annotation +- bump pipeline-controller +- Parse annotations from template +- Add cost estimate message if available +- Adds support for showing policy modes and policy configs in the UI + +- Show suspended status on pipelines detail +- YAML view for Pipelines +- Align and link logo + +- Actually remove the watcher from the helm-watcher-cache +- UI 1817 disable create target name space if name space is flux system + +- Adding edit capi cluster resource acceptance test +- Add preview acceptance test + +### Dependency versions + +- weave-gitops v0.10.1 +- cluster-controller v1.4.1 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 2.0.0 + + +## v0.9.6 +2022-10-17 + +### Highlights +- When adding applications, you can now preview the changes(PR) before creating a pull request +- You can now see included Cluster Profiles when previewing your Create Cluster PR +- Notifications are now available in the Notifications Page +- You can now automatically create namespace when adding applications + +### Dependency versions + +- weave-gitops v0.9.6 +- cluster-controller v1.3.2 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 1.2.1 + +## v0.9.5 +2022-09-22 + +### Highlights +- **Tenancy** + - `gitops create tenant` now supports `--prune` to remove old resources from the cluster if you're not using `--export` with gitops. + - `deploymentRBAC` section in `tenancy.yaml` allows you to specify the permissions given to the flux `Kustomizations` that will apply the resources from git to your tenants' namespaces in the cluster. + - Support for `OCIRepository` sources when restricting/allowing the sources that can be applied into tenants' namespaces. +- **Templates** + - Templates now support helm functions for simple transformations of values: `{{ .params.CLUSTER_NAME | upper }}` + - Templates has moved to its own page in the UI, this is the first step in moving towards embracing them as a more generic feature, not just for cluster creation. + - If a version is not specified in a **template profile annotation** it can be selected by the user. + - A `namespace` can be specified in the **template profile annotation** that will be provided as the `HelmRelease`'s `targetNamespace` by default. +- **Bootstrapping** + - A ClusterBootstrapConfig can now optionally be triggered when `phase="Provisioned"`, rather than `ControlPlaneReady=True` status. + +### Dependency versions + +- weave-gitops v0.9.5 +- cluster-controller v1.3.2 +- cluster-bootstrap-controller v0.3.0 +- (optional) policy-agent 1.1.0 + +### Known issues + +- [UI] Notifications page shows a 404 instead of the notification-controller's configuration. + +### ⚠️ Breaking changes from v0.9.4 + +If using the policy-agent included in the weave-gitops-enterprise helm chart, the configuration should now be placed under the `config` key. + +**old** +```yaml +policy-agent: + enabled: true + accountId: "my-account" + clusterId: "my-cluster" +``` + +**new** +```yaml +policy-agent: + enabled: true + config: + accountId: "my-account" + clusterId: "my-cluster" +``` + + diff --git a/website/versioned_docs/version-0.12.0/enterprise/user-permissions.mdx b/website/versioned_docs/version-0.12.0/enterprise/user-permissions.mdx new file mode 100644 index 0000000000..8f08a34273 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/enterprise/user-permissions.mdx @@ -0,0 +1,150 @@ +--- +title: User Permissions +sidebar_position: 1 +hide_title: true +--- +import TierLabel from "../_components/TierLabel"; + +# User permissions + +Weave GitOps Enterprise extends Weave GitOps [permissions](../configuration/user-permissions.mdx) by adding more roles. These roles may need to be extended further in order to support certain use cases. Some of the most common use cases are described below. + +## Progressive delivery with Flagger + +Weave GitOps Enterprise integrates with Flagger in order to provide a view on progressive delivery deployments. This includes the ability to view all the resources that Flagger manages during its operation. The default ClusterRole `gitops-canaries-reader` includes the minimum permissions necessary for a user to be able to view canary object details, metric template object details and canary related events. + +When Flagger is configured to integrate with a service mesh such as Linkerd or Istio for the rollout, then this ClusterRole needs to be extended so that it can read the additional service mesh resources being generated by Flagger. Note that currently, in order to display service mesh or ingress related resources, we require `spec.provider` to be set in each canary resource. + +The following table provides a list of all the custom resources that Flagger generates grouped by provider: + +| Provider | API Group | Resource | +| --- | --- | --- | +| AppMesh | appmesh.k8s.aws | virtualnode | +| | appmesh.k8s.aws | virtualrouter | +| | appmesh.k8s.aws | virtualservice | +| Linkerd | split.smi-spec.io | trafficsplit | +| Istio | networking.istio.io | destinationrule | +| | networking.istio.io | virtualservice | +| Contour | projectcontour.io | httpproxy | +| Gloo | gateway.solo.io | routetable | +| | gloo.solo.io | upstream | +| Nginx | networking.k8s.io | ingress | +| Skipper | networking.k8s.io | ingress | +| Traefik | traefik.containo.us | traefikservice | +| Open Service Mesh | split.smi-spec.io | trafficsplit | +| Kuma | kuma.io | trafficroute | +| GatewayAPI | gateway.networking.k8s.io | httproute | + +For example, the following manifest shows how `gitops-canaries-reader` has been extended to allow the user for viewing TrafficSplit resources when Linkerd is used: + +```yaml title="gitops-canaries-reader.yaml" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gitops-canaries-reader +rules: +- apiGroups: + - flagger.app + resources: + - canaries + - metrictemplates + verbs: + - get + - list +- apiGroups: + - "" + resources: + - events + verbs: + - get + - watch + - list +# Additional permissions for Linkerd resources are added below +- apiGroups: + - split.smi-spec.io + resources: + - trafficsplits + verbs: + - get + - list +``` + +### Setting up remote cluster permissions + +In order to view canaries in a remote cluster from the management cluster, you need to consider the following: +- The service account used to access the remote cluster needs to be able to list namespaces and custom resource definitions in the given cluster. It additionally needs to be able to impersonate users and groups. +- The user or group that logs in to the management cluster, needs appropriate permissions to certain resources of the remote cluster. + +For example, applying the following manifest on remote clusters, ensures that the `wego-admin` user will be able to view canary information from within the Weave GitOps Enterprise UI on the management cluster: + +```yaml title="remote-cluster-service-user-rbac.yaml" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: user-groups-impersonator +rules: + - apiGroups: [""] + resources: ["users", "groups"] + verbs: ["impersonate"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: impersonate-user-groups +subjects: + - kind: ServiceAccount + name: remote-cluster-01 # Service account created in remote cluster + namespace: default +roleRef: + kind: ClusterRole + name: user-groups-impersonator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: canary-reader +rules: + - apiGroups: [""] + resources: [ "events", "services" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "apps" ] + resources: [ "*" ] + verbs: [ "get", "list" ] + - apiGroups: [ "autoscaling" ] + resources: [ "*" ] + verbs: [ "get", "list" ] + - apiGroups: [ "flagger.app" ] + resources: [ "canaries", "metrictemplates"] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "helm.toolkit.fluxcd.io" ] + resources: [ "helmreleases" ] + verbs: [ "get", "list" ] + - apiGroups: [ "kustomize.toolkit.fluxcd.io" ] + resources: [ "kustomizations" ] + verbs: [ "get", "list" ] + - apiGroups: [ "source.toolkit.fluxcd.io" ] + resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ] + verbs: [ "get", "list" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-canaries +subjects: +- kind: User + name: wego-admin # User logged in management cluster, impersonated via service account + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: canary-reader + apiGroup: rbac.authorization.k8s.io +``` + +You may need to add more users/groups to the `read-canaries` ClusterRoleBinding to ensure additional users can view canary information from within the Weave GitOps Enterprise UI. \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/feedback-and-telemetry.md b/website/versioned_docs/version-0.12.0/feedback-and-telemetry.md new file mode 100644 index 0000000000..5517211fb3 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/feedback-and-telemetry.md @@ -0,0 +1,87 @@ +--- +title: Feedback and Telemetry +sidebar_position: 8 +hide_title: true +--- + +## Feedback + +We ❤️ your comments and suggestions as we look to make successfully adopting a cloud-native approach, to application deployment on Kubernetes with GitOps, easier and easier. There are a number of ways you can reach out: + +- Raise an [issue](https://github.com/weaveworks/weave-gitops/issues) +- Invite yourself to the Weave Users Slack. +- Chat to us on the [#weave-gitops](https://weave-community.slack.com/messages/weave-gitops/) slack channel. +- Set up time with one of our team: [David](https://calendly.com/david-harris-weaveworks) - Product Manager (UK) or [James](https://calendly.com/james-weave-works/product-interview) - Product Director (US - East Coast) +- Come along to one of our [events](https://www.meetup.com/Weave-User-Group/) + +## Anonymous Aggregate User Behavior Analytics + +Weaveworks is utilizing [Pendo](https://www.pendo.io/), a product-analytics app, to gather anonymous user behavior analytics for both Weave GitOps and Weave GitOps Enterprise. We use this data so we can understand what you love about Weave GitOps, and areas we can improve. + +Weave GitOps OSS users will be notified when you create the dashboard for the first time via **gitops create dashboard** or when you use **gitops run** for the first time and decide to install the dashboard via that functionality. Analytics will not be enabled until after this notification so that you can opt out before sending analytics data. + +For Weave GitOps Enterprise users, this functionality is turned on by default. Further below we go into more detail about how you can control this functionality. + +### Why are we collecting this data? + +We want to ensure that we are designing the best features, addressing the most pressing bugs, and prioritizing our roadmap appropriately for our users. Collecting analytics on our users’ behaviors gives us valuable insights and allows us to conduct analyses on user behavior within the product. This is important for us so we can make informed decisions- based on how, where and when our users use Weave GitOps - and prioritize what is most important to users like you. + +#### For example: + +We’d like to understand the usage of the graph and dependency tabs within the dashboard. If users are utilizing this feature, we would like to understand the value and how we can improve that feature. However, if users aren’t using it, we can conduct research to understand why and either fix it, or come to the conclusion that it really doesn’t serve any utility and focus our efforts on more valuable features. + +### How long is the collected data stored? + +Weave GitOps’s anonymous user and event data has a 24 month retention policy. The default value for data retention in Pendo is 7 years. For more information on Pendo’s data storage policies, [click here](https://support.pendo.io/hc/en-us/articles/360051268732-Subscription-Data-Retention-Limit). + +### What are we collecting? + +Weave GitOps gathers data on how the CLI and Web UI are used. There is no way for us or Pendo to connect our IDs to individual users or sites. + +For the CLI, we gather usage data on: +- The specific sub command itself - e.g. `gitops get bcrypt-hash` +- The name of the flags used, without the value (e.g. `--password`, but not the value) +- A random string used as an anonymous user ID, stored on your machine +- - **Note: We have no way of tracking individual users.** We can only distinguish between user counts and event counts +- Whether the user has installed the Enterprise or open-source version of the CLI +- A value of `app=cli`, to know it’s a CLI metric + +For the Web UI, we gather usage data on: +- Your browser, version, and user agent +- The domain name of your server +- Every page interaction, and the time each page is left open +- All button interactions +- The complete URL of every page, including which resource you look at, and searches done +- We can push new content into your browser, to add questions, guides, or more data points +- We send a unique user hash, based on your user name +- - **Note: We are not able to cross-reference unique users** between here and anywhere else - not even your command line - but it gives us the ability to distinguish between user counts and event counts. +- Finally, we include a persistent ID representing your cluster, based on a hash of your `kube-system` namespace uuid +- - **Note: There is no way for us to track individual clusters** using this, but it gives us the ability to distinguish between cluster counts and event counts. + +### When is the data collected and where is it sent? + +Weave GitOps CLI analytics are sent at startup. The dashboard analytics are sent through its execution. Both CLI and Dashboard analytics are sent to Pendo over HTTPS. + +### How? + +The CLI code is viewable in pkg/analytics. It will ignore any errors, e.g. if you don’t have any network connection. + +The dashboard setup code is viewable in ui/components/Pendo.tsx - this will fetch a 3rd party javascript from Pendo’s servers. + +### Opting out + +All the data collected, analytics, and feedback are for the sole purpose of creating better product experience for you and your teams. We would really appreciate it if you left the analytics on as it helps us prioritize which features to build next and what features to improve. However, if you do want to opt out of Weave GitOps’s analytics you can opt out of CLI and/or Dashboard analytics. + +#### CLI + +We have created a command to make it easy to turn analytics on or off for the CLI. + +**To disable analytics:** +*gitops set config analytics false* + +**To enable analytics:** +*gitops set config analytics true* + +#### Dashboard + +You need to update your helm release to remove `WEAVE_GITOPS_FEATURE_TELEMETRY` from the `envVars` value. diff --git a/website/versioned_docs/version-0.12.0/getting-started.mdx b/website/versioned_docs/version-0.12.0/getting-started.mdx new file mode 100644 index 0000000000..f71b69b7a1 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/getting-started.mdx @@ -0,0 +1,245 @@ +--- +title: Getting Started +sidebar_position: 2 +hide_title: true +--- + +# Getting Started with Weave GitOps + +This hands-on guide will introduce you to the basics of the GitOps Dashboard web UI, to help you understand the state of your system, before deploying a new application to your cluster. It is adapted from this guide - [Flux - Getting Started](https://fluxcd.io/docs/get-started/). + +If you haven't already, be sure to check out our [introduction](./intro.md) to Weave GitOps and our [installation docs](./installation/index.mdx). + +## Part 1 - Weave GitOps overview + +Weave GitOps provides insights into your application deployments, and makes continuous delivery with GitOps easier to adopt and scale across your teams. We will now login to the dashboard web UI and start to explore the state of our GitOps deployments. + +### Login to the GitOps Dashboard + +1. Expose the service running on the cluster + + ``` + kubectl port-forward svc/ww-gitops-weave-gitops -n flux-system 9001:9001 + ``` + +1. [Open the dashboard](http://localhost:9001/) and login using either the cluster user or OIDC based on your [configuration](./configuration/securing-access-to-the-dashboard.mdx). If you followed the example above, the username will be `admin`, and the password is the non-encrypted value you provided as $PASSWORD. + + ![Weave GitOps login screen](/img/dashboard-login.png) + +### Applications view + +When you login to the dashboard you are brought to the Applications view, which allows you to quickly understand the state of your deployments across a cluster at a glance. It shows summary information from `kustomization` and `helmrelease` objects. + +![Applications summary view showing Flux System and Weave GitOps deployments](/img/dashboard-applications-overview.png) + +In the above screenshot you can see: +- a `Kustomization` called `flux-system`, which was created when Flux was bootstrapped onto the Cluster, and is deploying the GitOps Toolkit controllers. It is also deploying further Flux objects defined in the same repo, so that Flux will deploy additional workloads which includes our Helm Chart for Weave GitOps. +- a `HelmRelease` called `ww-gitops` which deploys the aforementioned Helm Chart. + +This table view shows the reported status so you can understand whether a reconciliation has been successful, and when they have last been updated. You can also see where the Flux objects are deployed and which `Source` object they are reconciling from; clicking the name of the Source it will take you to a detail view for the given source object. The view automatically updates every few seconds so you know the current state of your system. + +You can search for and filter objects by `Name` by clicking the magnifying glass, or filter by `Type` by clicking the strawberry icon to its right. + +Clicking the Name of an object will take you to a detailed view for the given Kustomization or HelmRelease. Which we will explore in a moment. + +### The Sources view + +Clicking on Sources in the left hand menu will bring you to the Sources view. This view shows you where flux pulls its application definitions from, for example Git repositories, and the current state of that synchronization. This shows summary information from `gitrepository`, `helmrepository`, `helmchart` and `bucket` objects. + +![Sources summary view showing Flux System and Weave GitOps sources](/img/dashboard-sources.png) + +In the above screenshot you can see: +- a `GitRepository` called `flux-system`, which was created when Flux was bootstrapped onto the Cluster, and contains the manifests for the GitOps Toolkit and Weave GitOps and various Flux objects. +- a `HelmChart` called `flux-system-ww-gitops`, which is automatically created by Flux when you define a `HelmRelease` to deploy a Helm Chart from a given source. +- a `HelmRepository` called `ww-gitops` which pulls from the Helm Repository where the Weave GitOps Helm Chart is published. + +The table view again shows summary status information so you can see whether Flux has been able to successfully pull from a given source and which specific commit was last detected. It shows key information like the `Interval`, namely how frequently Flux will check for updates in a given source location. You can apply filtering as per the Applications view, can click the `URL` to navigate to a given source i.e. a repository in GitHub, or the `Name` of a `Source` to view more details about it. + + +### The Flux Runtime view + +Clicking on `Flux Runtime` provides information on the GitOps engine, which is continuously reconciling your desired and live state. It provides two different tabs: controllers and CRDs. + +#### Controllers + +The controllers tab shows your installed GitOps Toolkit Controllers and their version. + +![Flux Runtime view showing the various GitOps Toolkit controllers](/img/dashboard-flux-runtime.png) + +By default `flux bootstrap` will install the following controllers: +- helm-controller +- kustomize-controller +- notification-controller +- source-controller + +For a full description of the controllers, see [GitOps ToolKit components](https://fluxcd.io/docs/components/) in the Flux documentation. + +From this view you can see whether the controllers are healthy and which version of a given component is currently deployed. + +#### CRDs + +The CRD tab lists the custom resources that the GitOps Toolkit Controllers use. This helps you see what resources you will be able to create. + +![Flux Runtime view showing the various GitOps Toolkit controllers](/img/dashboard-flux-runtime-crd.png) + +### Exploring the flux-system deployment + +Let's explore the `flux-system` kustomization. Navigate back to the `Applications` view and click on the `flux-system` object. + +![Application detail view for the flux system kustomization](/img/dashboard-application-flux.png) + +After a few moments loading the data, you should see similar to the above screenshot. From here you can see key information about how this resource is defined: which `Source` it is reading from, the latest applied commit, the exact path with the Source repository that is being deployed, and the `Interval` in which Flux will look to reconcile any difference between the declared and live state - i.e. if a kubectl patch had been applied on the cluster, it would effectively be reverted. If a longer error message was being reported by this object, you would be able to see it in its entirety on this page. + +Underneath the summary information are four tabs: + +- Details (default) is a table view which shows all the Kubernetes objects (including flux objects, deployments, pods, services, etc) managed and deployed through this `kustomization`. +- Events (shown below) shows any related Kubernetes events to help you diagnose issues and understand health over time. +- Reconciliation Graph (shown below) provides a directional graph alternative to the Details view to help you understand how the various objects relate to each other. +- Dependencies provides a directional graph that helps you understand dependencies between objects, if there are any. This helps you make sure that your automations are set up in the correct order. +- Yaml (shown below) provides a raw dump on the current object as it currently exists inside your cluster. Note that this will be different from what's in your gitops repository, since this yaml view will contain the current status of the object. + +**Events tab** +![Application detail view showing events for an object](/img/dashboard-application-events.png) + +**Reconciliation Graph tab** +![Application detail view showing reconciliation graph - a directional graph showing object relationships](/img/dashboard-application-reconciliation.png) + +**Yaml tab** +![Application detail view showing the yaml display](/img/dashboard-application-yaml.png) + +#### Source details view +Finally lets look at the Source in more detail - go back to the Details tab, and click `GitRepository/flux-system` from the summary at the top of the page. + +![Source detail view showing details for an object](/img/dashboard-source-flux.png) + +As with an Application detail view, you can see key information about how the resource is defined. Then beneath alongside the Events tab, is a Related Automations view. This shows all the `kustomization` objects which have this object as their Source. + + +## Part 2 - Deploying and viewing podinfo application + +Now that you have a feel for how to navigate the dashboard. Let's deploy a new application and explore that as well. In this section we will use the [podinfo](https://github.com/stefanprodan/podinfo) sample web application. + +### Deploying podinfo + +1. Clone or navigate back to your git repository where you have bootstrapped Flux, for example: + + ``` + git clone https://github.com/$GITHUB_USER/fleet-infra + cd fleet-infra + ``` + +1. Create a `GitRepository` Source for podinfo + + ``` + flux create source git podinfo \ + --url=https://github.com/stefanprodan/podinfo \ + --branch=master \ + --interval=30s \ + --export > ./clusters/my-cluster/podinfo-source.yaml + ``` + +1. Commit and push the `podinfo-source` to the `fleet-infra` repository + + ``` + git add -A && git commit -m "Add podinfo source" + git push + ``` + +1. Create a `kustomization` to build and apply the podinfo manifest + + ``` + flux create kustomization podinfo \ + --target-namespace=flux-system \ + --source=podinfo \ + --path="./kustomize" \ + --prune=true \ + --interval=5m \ + --export > ./clusters/my-cluster/podinfo-kustomization.yaml + ``` + +1. Commit and push the `podinfo-kustomization` to the `fleet-infra` repository + + ``` + git add -A && git commit -m "Add podinfo kustomization" + git push + ``` + +### View the application in Weave GitOps + +Flux will detect the updated `fleet-infra` and add podinfo. If we navigate back to the [dashboard](http://localhost:9001/) you should see the podinfo application appear. + +![Applications summary view showing Flux System, Weave GitOps and Podinfo](/img/dashboard-applications-with-podinfo.png) + +Click on podinfo and you will see details about the deployment, including that there is 1 pod available. + +![Applications details view for podinfo showing 1 pods](/img/dashboard-podinfo-details.png) + +### Customize podinfo + +To customize a deployment from a repository you don’t control, you can use Flux in-line patches. The following example shows how to use in-line patches to change the podinfo deployment. + +1. Add the `patches` section as shown below to the field spec of your podinfo-kustomization.yaml file so it looks like this: + + ```yaml title="./clusters/my-cluster/podinfo-kustomization.yaml" + --- + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: podinfo + namespace: flux-system + spec: + interval: 60m0s + path: ./kustomize + prune: true + sourceRef: + kind: GitRepository + name: podinfo + targetNamespace: flux-system + // highlight-start + patches: + - patch: |- + apiVersion: autoscaling/v2beta2 + kind: HorizontalPodAutoscaler + metadata: + name: podinfo + spec: + minReplicas: 3 + target: + name: podinfo + kind: HorizontalPodAutoscaler + // highlight-end + ``` + +1. Commit and push the podinfo-kustomization.yaml changes: + + ``` + git add -A && git commit -m "Increase podinfo minimum replicas" + git push + ``` + +3. Navigate back to the dashboard and you will now see 2 newly created pods + + ![Applications details view for podinfo showing 3 pods](/img/dashboard-podinfo-updated.png) + + +### Suspend updates + +Suspending updates to a kustomization allows you to directly edit objects applied from a kustomization, without your changes being reverted by the state in Git. + +To suspend updates for a kustomization, from the details page, click on the suspend button at the top, and you should see it be suspended: + +![Podinfo details showing Podinfo suspended](/img/dashboard-podinfo-details-suspended.png) + +This shows in the applications view with a yellow warning status indicating it is now suspended + +![Applications summary view showing Podinfo suspended](/img/dashboard-podinfo-suspended.png) + +To resume updates, go back to the details page, click the resume button, and after a few seconds reconsolidation will continue: + +![Applications details view for podinfo being resumed](/img/dashboard-podinfo-updated.png) + +## Complete! + +Congratulations 🎉🎉🎉 + +You've now completed the getting started guide. We would welcome any and all [feedback](feedback-and-telemetry.md) on your experience. diff --git a/website/versioned_docs/version-0.12.0/gitops-run/_category_.json b/website/versioned_docs/version-0.12.0/gitops-run/_category_.json new file mode 100644 index 0000000000..cc3f1095f2 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/gitops-run/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "GitOps Run", + "position": 5 +} + \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/gitops-run/get-started.mdx b/website/versioned_docs/version-0.12.0/gitops-run/get-started.mdx new file mode 100644 index 0000000000..d635d9d49b --- /dev/null +++ b/website/versioned_docs/version-0.12.0/gitops-run/get-started.mdx @@ -0,0 +1,267 @@ +--- +title: Get Started +sidebar_position: 2 +hide_title: true +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Get Started with GitOps Run + +GitOps Run supports two different modes of operation - directly on a +cluster or as sandboxed sessions. The sandboxed sessions are intended +for shared environments where multiple users are running their own +sessions, whereas the direct mode is intended for a local cluster. + +## Direct mode + +### Prerequisites +#### Required +- Install the GitOps CLI. See [the installation](../installation/weave-gitops.mdx#gitops-cli) + +#### Optional +- This guide uses [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) for demonstrations, but it is not required to use GitOps Run +- The [Flux CLI](https://fluxcd.io/flux/installation/) is the quickest way to generate resource definitions, but the files can also be created manually + +### Kubernetes cluster +To get started with GitOps Run, you need a Kubernetes cluster. There +are many tools to set up a local cluster for test and development +purposes. + +**Note:** this tutorial assumes you have full control of your cluster - we +recommend a local cluster, but you can also use a remote cluster where +you have full `cluster-admin` privileges. + + + + +Install [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) and run + +```bash +kind create cluster +``` + + + +Install [k3d](https://k3d.io/) and run + +```bash +k3d cluster create mycluster +``` + + + +Install [minikube](https://minikube.sigs.k8s.io/docs/start/) and run + +```bash +minikube start +``` + + + +Install [Docker Desktop](https://www.docker.com/products/docker-desktop/) and enable Kubernetes. Then run + +``` +kubectl config set-context docker-desktop +``` + + + +GitOps Run works on any Kubernetes platform, but to avoid accidents +you have to explicitly white-list the context name. + +First, find the name of the context where you want to run `gitops beta run` - in this example, there's a cluster with the name "dev": + +```bash +$ kubectl config get-contexts +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* dev dev dev +``` + +Then, for any `gitops beta run` command in this guide, you'll have to add the flag `--allow-k8s-context=dev` + + + +Before you continue, make sure `kubectl get nodes` returns a node which is `Ready`. + +### Git repository + +You need to set up a Git repository to put your GitOps manifests +in. Any Git repository would do, for example create a new +[github](https://github.com/new) repository and clone that. + +### Set up GitOps Run + +To start GitOps Run, point it at the directory you want to keep your +application manifests. In this example, we will install a small demo application +[podinfo](https://github.com/stefanprodan/podinfo). + +We will start it with `--no-session` as it's a single user +cluster which we want to use in direct mode. The port-forward points +at the podinfo pod we will create later. + +``` +gitops beta run ./podinfo --no-session --port-forward namespace=dev,resource=svc/dev-podinfo,port=9898:9898 +``` + +You will now be asked if you want to install Flux and the GitOps +[dashboard](../getting-started.mdx). Answer yes and set a password, and +shortly after you should be able to [open the dashboard](http://localhost:9001) +to see what's in your cluster - including the resources that GitOps +Run is operating. + +### Start modifying + +GitOps Run will have created a directory `podinfo` in your +current directory. Inside that directory, there will only be a single +file `kustomization.yaml`. + +To create the podinfo automation, we have to create the resources to +run podinfo - we'll create a new `Namespace`, a `HelmRepository` that +references the Helm repository where the manifests are stored, and a +`HelmRelease` that references the chart and version. We can use the +`flux` CLI to generate the resource definition, or we can just create +the yaml files ourselves. + + + + +```bash +cat < ./podinfo/namespace.yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dev +EOF +flux create source helm podinfo --url=https://stefanprodan.github.io/podinfo --namespace=dev --export > ./podinfo/podinfo-source.yaml +flux create helmrelease podinfo --source=HelmRepository/podinfo --chart=podinfo --export --namespace=dev --target-namespace=dev > ./podinfo/podinfo-helmrelease.yaml +``` + + + +```yaml title="./podinfo/namespace.yaml" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dev +``` +```yaml title="./podinfo/podinfo-source.yaml" +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: podinfo + namespace: dev +spec: + interval: 1m0s + url: https://stefanprodan.github.io/podinfo +``` + +```yaml title="./podinfo/podinfo-helmrelease.yaml" +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: podinfo + namespace: dev +spec: + chart: + spec: + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + interval: 1m0s + targetNamespace: dev +``` + + + +The only remaining step is to import these files in the auto-generated +`kustomization.yaml`. Open it up, and you should see the following: + +```yaml title="./podinfo/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] # 👋 Start adding the resources you want to sync here +``` + +Change the last line so it instead looks like the following: + +```yaml title="./podinfo/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +// highlight-start +resources: + - namespace.yaml + - podinfo-source.yaml + - podinfo-helmrelease.yaml +// highlight-end +``` + +GitOps Run should now automatically upload these manifests and install +them. The dashboard should show you how the resources are being +reconciled, and when they're Ready you will be able to see podinfo +[here](http://localhost:9898). + + +### Update your app + +Now that GitOps Run is continuously watching and reconciling your +local files onto your cluster, we can start modifying the resources. + +We're going to be modifying the podinfo we set up in the previous +step. Open the current [podinfo](http://localhost:9898) and pay +attention to the background color. + +Now, open your HelmRelease file and add the values at the bottom, as +indicated: + +```yaml title="./podinfo/podinfo-helmrelease.yaml" +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: podinfo + namespace: dev +spec: + chart: + spec: + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + interval: 1m0s + targetNamespace: dev +// highlight-start + values: + ui: + color: "#C32148" +// highlight-end +``` + +When you hit save, you'll see GitOps Run upload new files, and once +it's reconciled the podinfo has changed to a bright red. + +### Turn on GitOps Mode + +Now that we've used this interactive environment to set up the +resources we want, we can switch over to full GitOps mode, where Flux +is permanently pulling from your own Git repository. + +The first run, when you turn off GitOps Run, it will ask you if you +want to bootstrap your cluster into GitOps mode. If you answer yes, it +will take you through a wizard to help you set up how you want it to +run - what repository, which branch, etc. + +When you hit submit, it will set up the repository and branch, add +Flux manifests, as well as the files you were just working on. From +this point on, you can make persistent changes by pushing them to this +repository. diff --git a/website/versioned_docs/version-0.12.0/gitops-run/overview.mdx b/website/versioned_docs/version-0.12.0/gitops-run/overview.mdx new file mode 100644 index 0000000000..63be0c11a0 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/gitops-run/overview.mdx @@ -0,0 +1,71 @@ +--- +title: Overview +sidebar_position: 1 +hide_title: true +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# GitOps Run Overview + +## Introduction + +GitOps is a powerful mechanism for creating consistent environments and having +multiple clusters stay in sync. If you build out your infrastructure correctly +you get predictable behaviours for your teams and you can have new environments +up and running quickly. However, GitOps can be challenging for the everyday developer +to work with and it can create some friction, especially for developers who are +less familiar with Kubernetes or Flux. + +The purpose of GitOps Run is to remove the complexity for developers so that +platform operators can create developer environments easily, and application developers +can benefit from GitOps and focus on writing code. + +Watch this video to learn more about how GitOps Run can help your team +get started with GitOps: + + + +### Additional Benefits +* No need to run `kubectl`, `helm`, `kustomize`, or `flux` CLI commands. Just create the manifests and we'll put them on the cluster for you. +* Reduces the cycle time when configuring your cluster. With normal GitOps there is a lot of commit/push/reconcile workflows that can be frustrating. This skips that and you can test your changes directly before committing and pushing code to yor Git repository. +* Multiple options for debugging Flux such as using the Dashboard that comes with Weave GitOps or getting live feedback by leveraging the [GitOps Tools for Flux](https://marketplace.visualstudio.com/items?itemName=Weaveworks.vscode-gitops-tools) VSCode extension. + +## Terminology + +### Modes + +#### GitOps: +This is the default mode we are always aiming for when using Weave GitOps. Whenever GitOps Run +is not active we want users to be in this mode. This means that the cluster is being driven by +some mechanism reading from Git, ideally Flux, and that system is applying those changes +to the cluster. + +#### Run: +This is when the cluster has GitOps Run running on the cluster. There is a live reload session +that is occurring and the cluster is no longer in a pure GitOps or Snowflake mode. Ideally, when +GitOps Run stops running that the cluster enters into the GitOps mode that is defined above. + +#### Snowflake: +We are referring to a cluster that is driven by some other mechanism outside of GitOps or Run. +For example, a platform operator could have run various kubectl apply commands and installed +a couple helm charts using helm. The only way for this cluster to reach this state again is to +rerun those commands or to transition to GitOps mode. + +### Sessions + +Weave GitOps Run can has two different ways of interacting with your cluster. + +#### Sandboxed + +First, is choosing a session. +This means we spin up a virtual cluster on your cluster creating a sandbox environment for your applications. +What this means is that you are running this application in an isolated environment and it will not impact the +rest of your cluster. When you are done and turn off GitOps Run we will then clean up the virtual cluster and +everything that was installed on it. You can push your changes to Git and then our system will take care of +pulling those changes onto the cluster. + +#### Cluster +When you pass the `--no-session` flag when starting up GitOps Run this means we do not put those payloads in +their own sandboxed environment. We will load them up directly into the cluster just as you would any other app. diff --git a/website/versioned_docs/version-0.12.0/gitops-templates/_category_.json b/website/versioned_docs/version-0.12.0/gitops-templates/_category_.json new file mode 100644 index 0000000000..c394b0548f --- /dev/null +++ b/website/versioned_docs/version-0.12.0/gitops-templates/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Templates", + "position": 6 +} \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/gitops-templates/templates.mdx b/website/versioned_docs/version-0.12.0/gitops-templates/templates.mdx new file mode 100644 index 0000000000..4ff8d4280b --- /dev/null +++ b/website/versioned_docs/version-0.12.0/gitops-templates/templates.mdx @@ -0,0 +1,320 @@ +--- +title: Using templates +sidebar_position: 3 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +# Why GitOps templates +GitOpsTemplates enables Application Developers to self-service components and services using Weave GitOps. +Turning knowledge into a library that can be self-served. + +## What are GitOps templates? +GitOps templates allows you to template resources in a single definition. Resources in a template can be anything that can be expressed in yaml (K8s, Flux primitives, TF controller, Crossplane, Cluster API). + + +**FAQ** + +**What are GitOps templates?** +GitOps templates allow you to template resources in a single definition. Resources in a template can be anything that can be expressed in yaml (K8s, Flux primitives, TF controller, Crossplane, Cluster API). +Templates are simple YAML files, that can be enriched with Parameters, Variables, Metadata and conditions. They can be rendered to create the resources they contain. For clusters it can be CAPI objects like MachinePool. It can be as well Kustomization (flux) or a TF controller resource. + +**Ok, what are the restrictions on GitOps templates?** + +Basically, the only restriction is that the template needs to be valid YAML. Besides that a rendered template can create any kind of resource. + +**How do they fit today into Weave GitOps?** + +We have added some metadata markup, which helps us to render the template nicely in the GUI. + +The template consumer will be only provided with the required Parameters/Inputs and the guardrails, the template gets rendered and we create a PR. Merging the PR will create all the templated resources. + +**How can I use GitOps templates?** + +GitOps Templates were originally introduced enabling self-service in the cluster creation flow. We quickly extended that to terraform, crossplane and Kubernetes resources like RBAC (Roles + Rolebindings). +You can have for example a template that provides a running Developer Environment, consisting in a EKS cluster, a RDS Database, and a branch + revision of the current application through a single template. + +## Organizing Templates + +Declare the type of a template by using the `weave.works/template-type` label. The value of the label is the name of the template type. The template type is used to group templates in the UI. + +Recommended template types: +- `application` - for application templates +- `cluster` - for cluster templates +- `terraform` - for Terraform templates +- `pipeline` - for Pipeline templates + +## Enabling/Disabling Template Components + +Enable or disable rendering of certain component sections in a template with the use of annotations. This can be done by using the `templates.weave.works/COMPONENT-enabled` annotation with a boolean value. + +Supported components: +- `profiles` +- `kustomizations` +- `credentials` + +Example: +``` +annotations: + templates.weave.works/profiles-enabled: "true" + templates.weave.works/kustomizations-enabled: "true" + templates.weave.works/credentials-enabled: "true" +``` + +## Default profile values + +Default and required profiles can be added via the template annotation `capi.weave.works/profile-INDEX` so that if the profiles section has been enabled on a template you can choose those profiles for the template. + +The annotation is added as the following: +``` +annotations: + capi.weave.works/profile-0: '{"name": "NAME", "version": "VERSION", "editable": EDITABLE, "namespace": "NAMESPACE"}' +``` +Where + - `name` - is the name of the profile in the default profiles repository + - `version` - (optional) will choose the default version + - `namespace` - (optional) is the default target namespace for the profile + - `editable` - (optional, default=false), allow the user to de-select this profile, making it a default instead of a requirement. + +## Rendering Templates + +Declare the render type indicating the templating language to be used to render the template by setting `spec.renderType`. + +Supported templating languages: + - **envsubst (default)** + envsubst which is short for environment substitution uses [envsubst](https://github.com/a8m/envsubst) for rendering, where `${CLUSTER_NAME}` style syntax can be used. It is the same templating format that is used by [clusterctl](https://cluster-api.sigs.k8s.io/clusterctl/overview.html). + + #### Supported Functions + + | __Expression__ | __Meaning__ | + | ----------------- | -------------- | + | `${var}` | Value of `$var` + | `${#var}` | String length of `$var` + | `${var^}` | Uppercase first character of `$var` + | `${var^^}` | Uppercase all characters in `$var` + | `${var,}` | Lowercase first character of `$var` + | `${var,,}` | Lowercase all characters in `$var` + | `${var:n}` | Offset `$var` `n` characters from start + | `${var:n:len}` | Offset `$var` `n` characters with max length of `len` + | `${var#pattern}` | Strip shortest `pattern` match from start + | `${var##pattern}` | Strip longest `pattern` match from start + | `${var%pattern}` | Strip shortest `pattern` match from end + | `${var%%pattern}` | Strip longest `pattern` match from end + | `${var-default}` | If `$var` is not set, evaluate expression as `$default` + | `${var:-default}` | If `$var` is not set or is empty, evaluate expression as `$default` + | `${var=default}` | If `$var` is not set, evaluate expression as `$default` + | `${var:=default}` | If `$var` is not set or is empty, evaluate expression as `$default` + | `${var/pattern/replacement}` | Replace as few `pattern` matches as possible with `replacement` + | `${var//pattern/replacement}` | Replace as many `pattern` matches as possible with `replacement` + | `${var/#pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` start + | `${var/%pattern/replacement}` | Replace `pattern` match with `replacement` from `$var` end + + - **templating** + + templating uses text/templating for rendering, using go-templating style syntax `{{ .params.CLUSTER_NAME }}` where params are provided by the `.params` variable. + Template functions can also be used with the syntax `{{ .params.CLUSTER_NAME | FUNCTION }}`. + + #### Supported functions [(from Sprig library)](http://masterminds.github.io/sprig/) + + | __Function Type__ | __Functions__ | + | ----------------- | -------------- | + | String Functions | *trim*, *wrap*, *randAlpha*, *plural* + | String List Functions | *splitList*, *sortAlpha* + | Integer Math Functions | *add*, *max*, *mul* + | Integer Slice Functions | *until*, untilStep + | Float Math Functions | *addf*, *maxf*, *mulf* + | Date Functions | *now*, *date* + | Defaults Functions | *default*, *empty*, *coalesce*, *fromJson*, *toJson*, *toPrettyJson*, *toRawJson*, ternary + | Encoding Functions | *b64enc*, *b64dec* + | Lists and List Functions | *list*, *first*, *uniq* + | Dictionaries and Dict Functions | *get*, *set*, *dict*, *hasKey*, *pluck*, *dig*, *deepCopy* + | Type Conversion Functions | *atoi*, *int64*, *toString* + | Flow Control Functions | *fail* + | UUID Functions | *uuidv4* + | Version Comparison Functions | *semver*, semverCompare + | Reflection | *typeOf*, *kindIs*, *typeIsLike* + +### Editing templates + +When rendering a template, a `templates.weave.works/create-request` annotation is added by default to the first resource in the `resourcetemplates`. It can be added to any other resource by simply adding the annotation in empty form. This annotation holds information about which template generated the resource and the parameter values used as a json string. + +If the resource type is one of the following and has this annotation, an `Edit resource` button will appear in the UI that allows the editing of the resource and re-rendering it: +- Applications: + - HelmRelease + - Kustomization +- Sources: + - HelmRepository + - GitRepository +- Clusters: + - GitopsCluster + +Example: +```yaml +spec: + resourcetemplates: + - apiVersion: v1 + kind: ConfigMap + metadata: + name: my-configmap + data: + my-key: my-value + - apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + # This annotation will add an `Edit resource` button in the UI for this resource + annotations: + templates.weave.works/create-request: '' + name: nginx + namespace: default +``` + +## Custom delimiters for `renderType: templating` + +The default delimiters for `renderType: templating` are `{{` and `}}`. These can be changed by setting the `templates.weave.works/delimiters` annotation on the profile. For example: + +- `templates.weave.works/delimiters: "{{,}}"` - default +- `templates.weave.works/delimiters: "${{,}}"` + - Use `${{` and `}}`, for example `${{ .params.CLUSTER_NAME }}` + - Useful as `{{` in yaml is invalid syntax and needs to be quoted. If you need to provide a un-quoted number value like `replicas: 3` you should use these delimiters. + - :x: `replicas: {{ .params.REPLICAS }}` Invalid yaml + - :x: `replicas: "{{ .params.REPLICAS }}"` Valid yaml, incorrect type. The type is a `string` not a `number` and will fail validation. + - :white_check_mark: `replicas: ${{ .params.REPLICAS }}` Valid yaml and correct `number` type. +- `templates.weave.works/delimiters: "<<,>>" ` + - Use `<<` and `>>`, for example `<< .params.CLUSTER_NAME >>` + - Useful if you are nesting templates and need to differentiate between the delimiters used in the inner and outer templates. + +## Modifying the rendered resources + +### The `add-common-bases` annotation + +The `templates.weave.works/add-common-bases: "true"` annotation can be used to +enabled and disable the addition of a "common bases" `Kustomization` to the +list of rendered files. +This kustomization will sync a path that is common to all clusters (`clusters/bases`). Useful to add RBAC and policy that should be applied to all clusters. + +### The `inject-prune-annotation` annotation + +The `templates.weave.works/inject-prune-annotation: "true"` annotation can be used to +enable and disable the injection of Flux's `prune` annotation into certain resources. + +When enabled we automatically inject a `kustomize.toolkit.fluxcd.io/prune: disabled` +annotation into every resource in the `spec.resourcetemplates` that is not a +`cluster.x-k8s.io.Cluster` and not a `gitops.weave.works.GitopsCluster`. + +The intention here is stop flux from explicitly deleting subresources of the `Cluster` like +`AWSCluster`, `KubeadmControlPlane`, `AWSMachineTemplate` etc and let the capi-controllers remove them itself. + +This is the pattern recommended in the capi-quickstart guide https://cluster-api.sigs.k8s.io/user/quick-start.html#clean-up. + +## Differences between `CAPITemplate` and `GitOpsTemplate` + +The only difference between `CAPITemplate` and `GitOpsTemplate` is the default value of these two annotations: + +| Annotation | default value for `CAPITemplate` | default value for `GitOpsTemplate` | +| ----------- | ---------------- | ------------------ | +| `templates.weave.works/add-common-bases` | `"true"` | `"false"` | +| `templates.weave.works/inject-prune-annotations` | `"true"` | `"false"` | + +## How to: Add a GitOps Template to create a cluster + +GitOps Templates objects need to be wrapped with the `GitOpsTemplate` custom resource and then loaded into the management cluster. + +```yaml +apiVersion: clustertemplates.weave.works/v1alpha2 +kind: GitOpsTemplate +metadata: + name: cluster-template-development + labels: + weave.works/template-type: cluster +spec: + description: This is the std. CAPD template + renderType: templating + params: + - name: CLUSTER_NAME + description: This is used for the cluster naming. + resourcetemplates: + - apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "{{ .params.CLUSTER_NAME }}" +``` + + +## Parameters + +You can provide additional metadata about the parameters to the templates in the `spec.params` section. + +### Required Parameters + +Templates use the `CLUSTER_NAME` to determine the path in gitrepository and `RESOURCE_NAME` for the file name when we create the pull request. + +You must provide: +- `CLUSTER_NAME` +- `RESOURCE_NAME` + +**Default path for templates** + +The default path for a template has a few components: +- From the params: `CLUSTER_NAME` or `RESOURCE_NAME`, **required**. +- From the params: `NAMESPACE`, default: `default` +- From values.yaml for the Weave GitOps Enterprise `mccp` chart: `values.config.capi.repositoryPath`, default: `./clusters/management/clusters` + +These are composed to create the path: +`${repositoryPath}/${NAMESPACE}/${CLUSTER_OR_RESOURCE_NAME}.yaml` + +Using the default values and supplying `CLUSTER_NAME` as `my-cluster` will result in the path: +`./clusters/management/clusters/default/my-cluster.yaml` + +### Parameters metadata - `spec.params` + +- `name`: The variable name within the resource templates +- `description`: Description of the parameter. This will be rendered in the UI and CLI +- `options`: The list of possible values this parameter can be set to. +- `required` - Whether the parameter must contain a non-empty value +- `default` - Default value of the parameter + +Sample: +``` +spec: + params: + - name: PARAM_NAME_1 + description: DESC_1 + options: [OPTION_1,OPTION_2] + default: OPTION_1 + - name: PARAM_NAME_2 + description: DESC_1 + required: true + default: DEFAULT_2 +``` + + +### Loading the template into the cluster + +Load templates into the cluster by adding them to your flux managed git repository or by using apply directly with +`kubectl apply -f capi-template.yaml` + +Weave GitOps will search for templates in the `default` namespace. This can be changed by configuring the `config.capi.namespace` value in the helm chart. + +## Full CAPD docker template example + +This example works with the CAPD provider, see [Cluster API Providers](../cluster-management/cluster-api-providers.mdx). + +import CodeBlock from "@theme/CodeBlock"; +import CapdTemplate from "!!raw-loader!../assets/templates/capd-template.yaml"; + + + {CapdTemplate} + + +## Updating from v1alpha1 to v1alpha2 + +`resourcetemplates` values was changed in `v1alpha2`. Instead of directly holding a list of Kubernetes resources manifests it is now a list of a new object that has two keys: + +- content: This now holds the resource manifests. +- path: This allows you to specify which path to render the templates into in the repo instead of rendering them to the default path. + +If the user does not wish to specify where to render the templates but still update and keep the old behavior then he will have to move the resource templates list under the content section. diff --git a/website/versioned_docs/version-0.12.0/guides/_category_.json b/website/versioned_docs/version-0.12.0/guides/_category_.json new file mode 100644 index 0000000000..b6d4772488 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Guides", + "position": 4 +} diff --git a/website/versioned_docs/version-0.12.0/guides/assets/templates/capa-template.yaml b/website/versioned_docs/version-0.12.0/guides/assets/templates/capa-template.yaml new file mode 100644 index 0000000000..e4685ec07e --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/assets/templates/capa-template.yaml @@ -0,0 +1,88 @@ +apiVersion: capi.weave.works/v1alpha1 +kind: CAPITemplate +metadata: + name: aws-eks-dev + namespace: default + labels: + weave.works/template-type: cluster +spec: + description: AWS EKS Development Cluster + params: + - name: CLUSTER_NAME + description: The name for this cluster. + - name: AWS_REGION + description: AWS Region to create cluster + options: ['us-east-1','eu-central-1','eu-west-2','us-west-2'] + - name: KUBERNETES_VERSION + description: EKS Kubernetes version to use + options: ['v1.19.8','v1.20.7','v1.21.2'] + - name: WORKER_MACHINE_COUNT + description: Number of worker nodes to create. + resourcetemplates: + - apiVersion: gitops.weave.works/v1alpha1 + kind: GitopsCluster + metadata: + name: "${CLUSTER_NAME}" + namespace: default + labels: + weave.works/capi: bootstrap + spec: + capiClusterRef: + name: "${CLUSTER_NAME}" + + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + name: ${CLUSTER_NAME} + namespace: default + labels: + weave.works/capi: bootstrap + spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: AWSManagedControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: AWSManagedControlPlane + name: ${CLUSTER_NAME}-control-plane + + - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: AWSManagedControlPlane + metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default + spec: + region: ${AWS_REGION} + sshKeyName: default + version: ${KUBERNETES_VERSION} + eksClusterName: ${CLUSTER_NAME} + + - apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachinePool + metadata: + name: ${CLUSTER_NAME}-pool-0 + namespace: default + spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AWSManagedMachinePool + name: ${CLUSTER_NAME}-pool-0 + + - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AWSManagedMachinePool + metadata: + name: ${CLUSTER_NAME}-pool-0 + namespace: default + spec: {} diff --git a/website/versioned_docs/version-0.12.0/guides/cert-manager.md b/website/versioned_docs/version-0.12.0/guides/cert-manager.md new file mode 100644 index 0000000000..a5373352cb --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/cert-manager.md @@ -0,0 +1,98 @@ +--- +title: Generating TLS certificates with cert-manager and Let's Encrypt +sidebar_position: 2 +--- + +In this guide we will show you how to add cert-manager to a cluster bootstrapped with Weave GitOps, and how +to configure the use of Let's Encrypt to issue TLS certificates. + +### Pre-requisites + +- A Kubernetes cluster such as [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) cluster running a +[Flux-supported version of Kubernetes](https://fluxcd.io/docs/installation/#prerequisites) +- Weave GitOps is [installed](../installation/index.mdx) + +## What is cert-manager? + +[cert-manager](https://cert-manager.io/), a CNCF project, provides a way to automatically manage certificates +in Kubernetes and OpenShift clusters. "It will obtain certificates from a variety of Issuers, both popular public +Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to +renew certificates at a configured time before expiry". + +## Install cert-manager + +As cert-manager can be installed using a [Helm Chart](https://cert-manager.io/docs/installation/helm/), we can +simply create a `HelmRepository` and a `HelmRelease` to have Flux install everything. + +Commit the following to a location being reconciled by Flux. + +```yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 1h + url: https://charts.jetstack.io +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 5m + chart: + spec: + chart: cert-manager + version: 1.8.0 + sourceRef: + kind: HelmRepository + name: cert-manager + namespace: cert-manager + interval: 1m + values: + installCRDs: true +``` + +:::note cert-manager version +At time of writing, cert manager v1.8.0 was the latest available release and a newer version may exist, please +ensure to check for updates. +::: + +Now that `cert-manager` is running, we can create a `ClusterIssuer` to represent the certificate authority +from which we will obtain signed certificates, in this example we are using Let's Encrypt. After changing +the email address, commit this to the same location as above. + +```yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: weave-gitops@example.tld + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: letsencrypt-prod-account-key + solvers: + # Add a single challenge solver, HTTP01 using nginx + - http01: + ingress: + class: nginx +``` + +Once this `ClusterIssuer` resource is installed, the cluster is now configured to request and use certificates generated by Cert Manager. + +This could be manually requested through the creation of a [Certificate resource](https://cert-manager.io/docs/usage/certificate/#creating-certificate-resources) or configured to be automatic as shown in our [Configuring OIDC with Dex and GitHub](./setting-up-dex.md) guide. diff --git a/website/versioned_docs/version-0.12.0/guides/delivery.mdx b/website/versioned_docs/version-0.12.0/guides/delivery.mdx new file mode 100644 index 0000000000..96159162d1 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/delivery.mdx @@ -0,0 +1,603 @@ +--- +title: Progressive delivery using Flagger +hide_title: true +sidebar_position: 6 +--- + +import TierLabel from "../_components/TierLabel"; + +# Progressive delivery using Flagger + +[Flagger](https://docs.flagger.app/) is a progressive delivery operator for Kubernetes. It is +designed to reduce risks when introducing new software versions and to improve time to delivery +through automating production releases. Weave GitOps Enterprise's UI allows you to view the state of +these progressive delivery rollouts, and how they are configured using Flagger's +[canary](https://docs.flagger.app/usage/how-it-works#canary-resource) object, through the +Applications > Delivery view. + +![Applications Delivery view](/img/dashboard-applications-delivery.png) + +This guide uses Flux manifests to install Flagger and Linkerd. Flagger can work with a number of +service meshes and ingress controllers, to support various progressive delivery [deployment +strategies](https://docs.flagger.app/usage/deployment-strategies): + +![canary release icon](/img/canary.svg) **Canary Release** - where traffic is gradually shifted to +the new version and its performance is assessed. Based on this analysis of KPIs a release is either +promoted or the update abandoned. +![a b testing icon](/img/ab.svg) **A/B Testing** - uses HTTP headers or cookies to ensure users + stay on the same version of an application during the canary analysis. +![blue green testing icon](/img/blue-green.svg) **Blue/Green** - where tests are run against the + new version, and if successful, traffic is then switched from the current service. +![blue green mirroring icon](/img/mirroring.svg) **Blue/Green with Traffic Mirroring** - sends + copies of incoming requests to the new version. The user receives the response from the current + service and the other is discarded. The new version is promoted only if metrics are healthy. + +Using Flux allows us to manage our cluster applications in a declarative way through changes in a +Git repository. + +In this guide, we will walk you through a full end-to-end scenario where you will: +- [Install the Linkerd service mesh](#installing-linkerd-using-flux) +- [Install Flagger](#installing-flagger-using-flux) +- [Deploy a sample application using a canary release strategy based on metrics provided through + Linkerd's in-built Prometheus instance](#deploy-a-canary-release) + +## Prerequisites +- This guide assumes you already have a Kubernetes cluster running and have bootstrapped Flux. To + apply the manifests listed in this guide, you will need to commit them to a repository being + reconciled with Flux. For help installing Flux, you can follow their [getting + started](https://fluxcd.io/docs/get-started/) documentation. +- Flagger requires the `autoscaling/v2` or `autoscaling/v2beta2` API to be installed on the cluster, you can use `kubectl + api-resources` to check which API versions are supported. +- The [step](https://smallstep.com/cli/) CLI installed to generate certificates in order to support + mTLS connections. + +## Installing Linkerd using Flux + +For the Linkerd installation, a Kustomization file will be used. This will allow us to specify the +installation order and the default namespace for the installed resources but also to easily generate +Secrets from certificate files via the use of a `secretGenerator`. + +In order to support mTLS connections between meshed pods, Linkerd requires a trust anchor +certificate and an issuer certificate with its corresponding key. These certificates are +automatically created when the `linkerd install` command is used but when using a Helm chart to +install Linkerd, these certificates need to be provided. The `step` CLI allows us to generate these +certificates. + +To generate the trust anchor certificate run: +```bash +step certificate create root.linkerd.cluster.local ca.crt ca.key \ +--profile root-ca --no-password --insecure +``` + +To generate the issuer certificate run: +```bash +step certificate create identity.linkerd.cluster.local issuer.crt issuer.key \ +--profile intermediate-ca --not-after 8760h --no-password --insecure \ +--ca ca.crt --ca-key ca.key +``` + +Add the `ca.crt`, `issuer.crt` and `issuer.key` files to the cluster repository under a `linkerd` +directory. + +To control where the Linkerd components get installed, we need to add a Namespace resource: + +```yaml title="linkerd/namespace.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: linkerd + labels: + config.linkerd.io/admission-webhooks: disabled +``` + +Make the Linkerd Helm repository available in the cluster, by adding the following `HelmRepository` +manifest: + +```yaml title="linkerd/source.yaml" +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: linkerd +spec: + interval: 1h + url: https://helm.linkerd.io/stable +``` + +Then, to install the latest version of Linkerd, add the following `HelmRelease` manifests: + +```yaml title="linkerd/releases.yaml" +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: linkerd +spec: + interval: 10m + chart: + spec: + chart: linkerd2 + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: linkerd + install: + crds: Create + upgrade: + crds: CreateReplace + valuesFrom: + - kind: Secret + name: linkerd-certs + valuesKey: ca.crt + targetPath: identityTrustAnchorsPEM + - kind: Secret + name: linkerd-certs + valuesKey: issuer.crt + targetPath: identity.issuer.tls.crtPEM + - kind: Secret + name: linkerd-certs + valuesKey: issuer.key + targetPath: identity.issuer.tls.keyPEM + values: + installNamespace: false + identity: + issuer: + crtExpiry: "2023-07-18T20:00:00Z" # Change this to match generated certificate expiry date +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: linkerd-viz +spec: + interval: 10m + dependsOn: + - name: linkerd + chart: + spec: + chart: linkerd-viz + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: linkerd +``` + +The value for the `spec.values.identity.issuer.crtExpiry` field above depends on the parameter value +used during the creation of the issuer certificate previously. In this example, it should be set to +1 year from the certificate creation. + +Then, add the following file to instruct Kustomize to patch any `Secrets` that are referenced in +`HelmRelease` manifests: + +```yaml title="linkerd/kustomizeconfig.yaml" +nameReference: + - kind: Secret + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease +``` + +Finally, add the following Kustomization file that references all the previous files that were +added: + +```yaml title="linkerd/kustomization.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: linkerd +configurations: +- kustomizeconfig.yaml +resources: +- namespace.yaml +- source.yaml +- releases.yaml +secretGenerator: + - name: linkerd-certs + files: + - ca.crt + - issuer.crt + - issuer.key +``` + +The `secretGenerator` is used to generate Secrets from the generated files. + +The `linkerd` directory in the cluster repository should look like this: + +```bash +> tree linkerd +linkerd +├── ca.crt +├── issuer.crt +├── issuer.key +├── kustomization.yaml +├── kustomizeconfig.yaml +├── namespace.yaml +├── releases.yaml +└── source.yaml +``` + +Once Flux reconciles this directory to the cluster, Linkerd should get installed. + +Before proceeding to the next step, check that all the Linkerd pods have started successfully: + +```bash +> kubectl get pods -n linkerd +NAME READY STATUS RESTARTS AGE +linkerd-destination-66d5668b-4mw49 4/4 Running 0 10m +linkerd-identity-6b4658c74b-6nc97 2/2 Running 0 10m +linkerd-proxy-injector-6b76789cb4-8vqj4 2/2 Running 0 10m + +> kubectl get pods -n linkerd-viz +NAME READY STATUS RESTARTS AGE +grafana-db56d7cb4-xlnn4 2/2 Running 0 10m +metrics-api-595c7b564-724ps 2/2 Running 0 10m +prometheus-5d4dffff55-8fscd 2/2 Running 0 10m +tap-6dcb89d487-5ns8n 2/2 Running 0 10m +tap-injector-54895654bb-9xn7k 2/2 Running 0 10m +web-6b6f65dbc7-wltdg 2/2 Running 0 10m +``` + +:::note +Make sure that any new directories that you add to the cluster repository as part of this guide, +are included in a path that Flux reconciles. +::: + + +## Installing Flagger using Flux + +For the Flagger installation, a Kustomization file will be used to define the installation order and +provide a default namespace for the installed resources. + +Create a new `flagger` directory and make sure it is under a repository path that Flux reconciles. + +Then, add a Namespace resource for Flagger: + +```yaml title="flagger/namespace.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: flagger +``` + +Then, to make the Flagger Helm repository available in the cluster, add the following +`HelmRepository` manifest: + +```yaml title="flagger/source.yaml" +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: flagger +spec: + interval: 1h + url: https://flagger.app +``` + +Then, to install the latest version of Flagger and the load tester app, which is used to generate +synthetic traffic during the analysis phase, add the following `HelmRelease` manifests: + +```yaml title="flagger/releases.yaml" +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: flagger +spec: + releaseName: flagger + install: + crds: Create + upgrade: + crds: CreateReplace + interval: 10m + chart: + spec: + chart: flagger + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: flagger + values: + metricsServer: http://prometheus.linkerd-viz:9090 + meshProvider: linkerd +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: loadtester +spec: + interval: 10m + chart: + spec: + chart: loadtester + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: flagger +``` + +Finally, add the following Kustomization file that references all the previous files that were +added: + +```yaml title="flagger/kustomization.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flagger +resources: +- namespace.yaml +- source.yaml +- releases.yaml +``` + +The `flagger` directory in the cluster repository should look like this: + +```bash +> tree flagger +flagger +├── kustomization.yaml +├── namespace.yaml +├── releases.yaml +└── source.yaml +``` + +Once Flux reconciles this directory to the cluster, Flagger and the load tester app should get +installed. + +Before proceeding to the next step, check that all the Flagger pods have started successfully: + +```bash +> kubectl get pods -n flagger +NAME READY STATUS RESTARTS AGE +flagger-7d456d4fc7-knf2g 1/1 Running 0 4m +loadtester-855b4d77f6-scl6r 1/1 Running 0 4m +``` + +## Deploy a canary release + +To demonstrate the progressive rollout of an application, +[podinfo](https://github.com/stefanprodan/podinfo) will be used. + +We will configure a [Canary release +strategy](https://docs.flagger.app/usage/deployment-strategies#canary-release), where Flagger will +scale up a new version of the application (the canary), alongside the existing version (the +primary), and gradually increase traffic to the new version in increments of 5%, up to a maximum of +50%. It will continuously monitor the new version for an acceptable request response rate and +average request duration. Based on this analysis, Flagger will either update the primary to the new +version, or abandon the promotion; then scale the canary back down to zero. + +Add a Namespace resource: + +```yaml title="test/namespace.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: test + annotations: + linkerd.io/inject: enabled +``` + +Then, add a Deployment resource and a HorizontalPodAutoscaler resource for the `podinfo` +application: + +```yaml title="test/deployment.yaml" +apiVersion: apps/v1 +kind: Deployment +metadata: + name: podinfo + labels: + app: podinfo +spec: + minReadySeconds: 5 + revisionHistoryLimit: 5 + progressDeadlineSeconds: 60 + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: podinfo + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9797" + labels: + app: podinfo + spec: + containers: + - name: podinfod + image: ghcr.io/stefanprodan/podinfo:6.1.8 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 9898 + protocol: TCP + - name: http-metrics + containerPort: 9797 + protocol: TCP + - name: grpc + containerPort: 9999 + protocol: TCP + command: + - ./podinfo + - --port=9898 + - --port-metrics=9797 + - --grpc-port=9999 + - --grpc-service-name=podinfo + - --level=info + - --random-delay=false + - --random-error=false + env: + - name: PODINFO_UI_COLOR + value: "#34577c" + livenessProbe: + exec: + command: + - podcli + - check + - http + - localhost:9898/healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + exec: + command: + - podcli + - check + - http + - localhost:9898/readyz + initialDelaySeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 2000m + memory: 512Mi + requests: + cpu: 100m + memory: 64Mi + +--- +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: podinfo +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: podinfo + minReplicas: 2 + maxReplicas: 4 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + # scale up if usage is above + # 99% of the requested CPU (100m) + averageUtilization: 99 +``` + +Then, add a Canary resource that references the Deployment and HorizontalPodAutoscaler resources, +note that we have not needed to define a service resource above, instead this is specified within +the Canary definition and created by Flagger: + +```yaml title="test/canary.yaml" +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + name: podinfo +spec: + # deployment reference + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: podinfo + # HPA reference (optional) + autoscalerRef: + apiVersion: autoscaling/v2beta2 + kind: HorizontalPodAutoscaler + name: podinfo + # the maximum time in seconds for the canary deployment + # to make progress before it is rollback (default 600s) + progressDeadlineSeconds: 60 + service: + # ClusterIP port number + port: 9898 + # container port number or name (optional) + targetPort: 9898 + analysis: + # schedule interval (default 60s) + interval: 30s + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + # Linkerd Prometheus checks + metrics: + - name: request-success-rate + # minimum req success rate (non 5xx responses) + # percentage (0-100) + thresholdRange: + min: 99 + interval: 1m + - name: request-duration + # maximum req duration P99 + # milliseconds + thresholdRange: + max: 500 + interval: 30s + # testing (optional) + webhooks: + - name: acceptance-test + type: pre-rollout + url: http://loadtester.flagger/ + timeout: 30s + metadata: + type: bash + cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token" + - name: load-test + type: rollout + url: http://loadtester.flagger/ + metadata: + cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/" +``` + +Finally, add a Kustomization file to apply all resources to the `test` namespace: + +```yaml title="test/kustomization.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: test +resources: +- namespace.yaml +- deployment.yaml +- canary.yaml +``` + +After a short time, the status of the canary object should be set to `Initialized`: + +![Canary rollout initialized](/img/pd-details-initialized.png) + +```bash +> kubectl get canary podinfo -n test +NAME STATUS WEIGHT LASTTRANSITIONTIME +podinfo Initialized 0 2022-07-22T12:37:58Z +``` + +Now trigger a new rollout by bumping the version of `podinfo`: + +```bash +> kubectl set image deployment/podinfo podinfod=ghcr.io/stefanprodan/podinfo:6.0.1 -n test +``` + +During the progressive rollout, the canary object reports on its current status: + + +![Canary rollout progressing](/img/pd-details-progressing.png) + +```bash +> kubectl get canary podinfo -n test +NAME STATUS WEIGHT LASTTRANSITIONTIME +podinfo Progressing 5 2022-07-22T12:41:57Z +``` + +After a short time the rollout is completed and the status of the canary object is set to +`Succeeded`: + +![Canary rollout succeeded](/img/pd-details-succeeded.png) + +```bash +> kubectl get canary podinfo -n test +NAME STATUS WEIGHT LASTTRANSITIONTIME +podinfo Succeeded 0 2022-07-22T12:47:58Z +``` + +## Summary + +Congratulations, you have now completed a progressive delivery rollout with Flagger and Linkerd +:tada: + +Next steps: +- Explore more of what [Flagger](https://flagger.app/) can offer +- Configure [manual approving](flagger-manual-gating.mdx) for progressive delivery deployments \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/guides/deploying-capa.mdx b/website/versioned_docs/version-0.12.0/guides/deploying-capa.mdx new file mode 100644 index 0000000000..9a989496bc --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/deploying-capa.mdx @@ -0,0 +1,72 @@ +--- +title: Deploying CAPA with EKS +hide_title: true +sidebar_position: 1 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Deploying CAPA with EKS + +## Creating your first CAPA Cluster + +:::note BEFORE YOU START + +Make sure the following software is installed before continuing with these instructions: + +- `github cli` >= 2.3.0 [(source)](https://cli.github.com/) +- `kubectl` [(source)](https://kubernetes.io/docs/tasks/tools/#kubectl) +- `eksctl` [(source)](https://github.com/weaveworks/eksctl/releases) +- `aws cli` [(source)](https://aws.amazon.com/cli/) +- `clusterclt` >= v1.0.1 [(source)](https://github.com/kubernetes-sigs/cluster-api/releases) +- `clusterawsadm` >= v1.1.0 [(source)](https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases) + +The `AWS_ACCESS_KEY_ID`and `AWS_SECRET_ACCESS_KEY` of a user should be configured either via `aws configure` or exported in the current shell. +The `GITHUB_TOKEN` should be set as an environment variable in the current shell. It should have permissions to create Pull Requests against the cluster config repo. +::: + +If you've followed the [Installation guide](installation/weave-gitops-enterprise.mdx) you should have a management cluster ready to roll. + +### 1. Configure a capi provider + +See [Cluster API Providers](cluster-management/cluster-api-providers.mdx) page for more details on providers. He're we'll continue with `eks` and `capa` as an example. + +```bash +# Enable support for `ClusterResourceSet`s for automatically installing CNIs +export EXP_EKS=true +export EXP_MACHINE_POOL=true +export CAPA_EKS_IAM=true +export EXP_CLUSTER_RESOURCE_SET=true + +clusterctl init --infrastructure aws +``` + +### 2. Add a template + +See [CAPI Templates](gitops-templates/templates.mdx) page for more details on this topic. Once we load a template we can use it in the UI to create clusters! + +import CapaTemplate from "!!raw-loader!./assets/templates/capa-template.yaml"; + +Download the template below to your config repository path, then commit and push to your git origin. + + + {() => ( + + curl -o clusters/management/capi/templates/capa-template.yaml{" "} + {window.location.protocol}//{window.location.host} + {require("./assets/templates/capa-template.yaml").default} + + )} + + + + {CapaTemplate} + diff --git a/website/versioned_docs/version-0.12.0/guides/displaying-custom-metadata.mdx b/website/versioned_docs/version-0.12.0/guides/displaying-custom-metadata.mdx new file mode 100644 index 0000000000..7f8a1b944b --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/displaying-custom-metadata.mdx @@ -0,0 +1,59 @@ +--- +title: Displaying custom metadata +sidebar_position: 4 +--- +Weave Gitops lets you add annotations with custom metadata to your +flux automations and sources, and they will be displayed in the main UI. + +For example, you might use this to add links to dashboards, issue +system or another external system, or documentation and comments that +are visible straight in the main UI. + +We will use the podinfo application that we installed in the [getting +started guide](../getting-started.mdx) as an example. Open up the +podinfo kustomization and add annotations to it so it looks like this: + +```yaml title="./clusters/my-cluster/podinfo-kustomization.yaml" +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: podinfo + namespace: flux-system +// highlight-start + annotations: + metadata.weave.works/description: | + Podinfo is a tiny web application made with Go that showcases best practices of running microservices in Kubernetes. + Podinfo is used by CNCF projects like Flux and Flagger for end-to-end testing and workshops. + metadata.weave.works/grafana-dashboard: https://grafana.my-org.example.com/d/podinfo-dashboard +// highlight-end +spec: + interval: 5m0s + path: ./kustomize + prune: true + sourceRef: + kind: GitRepository + name: podinfo + targetNamespace: flux-system +``` + +When you open the gitops dashboard and navigate to the Kustomization +details, you should see the following: + +![Application detail view showing custom metadata](/img/metadata-display.png) + +There are some restrictions to keep in mind: + + * The annotation key has to start with the domain + `metadata.weave.works`. Any other annotations will be ignored. + * The key that will be displayed is whatever you put after the + domain, title cased, and with dashes replaced with spaces. Above, + `metadata.weave.works/grafana-dashboard` was displayed as "Grafana Dashboard". + * The value can either be a link, or can be plain text. Newlines in + plain text will be respected. + * The key is subject to certain limitations that kubernetes impose on + annotations - it must be shorter than 63 characters (not including + the domain), and must be an English alphanumeric character, or one of + `-._`. See the [kubernetes + documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set) + for the full list of restrictions. diff --git a/website/versioned_docs/version-0.12.0/guides/flagger-manual-gating.mdx b/website/versioned_docs/version-0.12.0/guides/flagger-manual-gating.mdx new file mode 100644 index 0000000000..26960a7d17 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/flagger-manual-gating.mdx @@ -0,0 +1,147 @@ +--- +title: Manual approval for progressive delivery deployments +sidebar_position: 7 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +Weave GitOps Enterprise helps you understand the state of progressive delivery +updates to your applications with [Flagger](https://flagger.app). The Delivery +view shows all your deployed `Canary` objects and the status for how a rollout +is progressing. + +By default, Flagger will automatically promote a new version of an application +should it pass the defined checks during an analysis phase. However, you can +also configure [webhooks](https://docs.flagger.app/usage/webhooks) to enable +manual approvals for Flagger to proceed to the next phase of a rollout. + +In this guide we will show you how to get started with manually gating a +progressive delivery promotion with Flagger, using the in-built load tester as +a way to demonstrate and learn the capability so that you could configure +your own gates. + +## Pre-requisites +- Basic knowledge of [Flagger](https://flagger.app). +- An existing `Canary` object and target deployment. +- Flagger's load tester [installed](https://docs.flagger.app/usage/webhooks#load-testing) + +## Basic introduction to Webhooks and Gating +Flagger can be configured to work with several types of hooks which will be called at +given stages during a progressive delivery rollout. Some of these allow you to manually +gate whether a rollout proceeds at certain points: +- Before a new deployment is scaled up and canary analysis begins with `confirm-rollout`. +- Before traffic weight is increased with `confirm-traffic-increase`. +- Before a new version is promoted following successful canary analysis with `confirm-promotion`. + +Any URL can be used as a webhook target, it will approve if it returns with a +`200 OK` status code, and halt if it's `403 Forbidden`. + +The webhook will receive a JSON payload that can be unmarshaled as +`CanaryWebhookPayload`: + +```go +type CanaryWebhookPayload struct { + // Name of the canary + Name string `json:"name"` + + // Namespace of the canary + Namespace string `json:"namespace"` + + // Phase of the canary analysis + Phase CanaryPhase `json:"phase"` + + // Metadata (key-value pairs) for this webhook + Metadata map[string]string `json:"metadata,omitempty"` +} +``` + +For more information on Webhooks in Flagger, see the +[Flagger documentation](https://docs.flagger.app/usage/webhooks) + + +## Using Flagger's load tester to manually gate a promotion +To enable manual approval of a promotion we are going to configure the +`confirm-promotion` webhook to call a particular gate provided through +Flagger's included load tester. This is an easy way to experiment with +the capability using Flagger's included components. + +**Important note** +We strongly recommend that you DO NOT USE the load tester for manual gating +in a production environment. There is no auth on the load tester, so +anyone with access to the cluster would be able to open and close; and +the load tester has no storage, so if restarted - all gates would close. + +Instead, configure these webhooks for appropriate integration with a +tool of your choice such Jira, Slack, Jenkins, etc. + +### Configure the confirm-promotion webhook +In your Canary object, add the following in the `analysis` section: + +```yaml + analysis: + webhooks: + - name: "ask for confirmation" + type: confirm-promotion + url: http://flagger-loadtester.test/gate/check +``` + +This gate is closed by default. + +### Deploy a new version of your application +Trigger a Canary rollout by updating your target deployment/daemonset, for +example by bumping the container image tag. A full list of ways to trigger +a rollout is available +[here](https://docs.flagger.app/faq#how-to-retry-a-failed-release). + +You can watch the progression of a Canary in Weave GitOps Enterprise (WGE) +through the Applications > Delivery view: + +![Podinfo Canary progressing](/img/pd-table-progressing.png) + + +### Wait for the Canary analysis to complete +Once the Canary analysis has successfully completed, Flagger will call the +`confirm-promotion` webhook and change status to `WaitingPromotion` as you +can see in the screenshots below: + +![Podinfo Canary showing Waiting Promotion - table view](/img/pd-table-waiting.png) + +![Podinfo Canary showing Waiting Promotion - details view](/img/pd-details-waiting.png) + +### Open the gate +To open the gate and therefore confirm that you are happy for the new +version of your application to be promoted, we can exec into the load tester +container: + +``` +$ kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh + +# to open +> curl -d '{"name": "app","namespace":"test"}' http://localhost:8080/gate/open +``` + +Flagger will now proceed to promote the Canary version to the primary and +complete the progressive delivery rollout :tada: + +![Podinfo Canary succeeded - full events history](/img/pd-events-gate-passed.png) + +![Podinfo Canary succeeded - promoting](/img/pd-table-promoting.png) + +![Podinfo Canary succeeded - promoted](/img/pd-table-succeeded.png) + + +To manually close the gate again you can issue: + +``` +> curl -d '{"name": "app","namespace":"test"}' http://localhost:8080/gate/close +``` + +**References:** + +* This guide was informed by the +[Official Flagger documentation](https://docs.flagger.app/usage/webhooks#manual-gating) diff --git a/website/versioned_docs/version-0.12.0/guides/setting-up-dex.md b/website/versioned_docs/version-0.12.0/guides/setting-up-dex.md new file mode 100644 index 0000000000..9692b15dcc --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/setting-up-dex.md @@ -0,0 +1,278 @@ +--- +title: Configuring OIDC with Dex and GitHub +sidebar_position: 3 +--- + +In this guide we will show you how to enable users to login to the Weave GitOps dashboard by authenticating with their GitHub account. + +This example uses [Dex][tool-dex] and its GitHub connector, and assumes Weave GitOps has already been installed on a Kubernetes clusters. + +### Pre-requisites + +- A Kubernetes cluster such as [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) cluster running a +[Flux-supported version of Kubernetes](https://fluxcd.io/docs/installation/#prerequisites) +- Weave GitOps is [installed](../installation/index.mdx) and [TLS has been enabled](../configuration/tls.md). + +## What is Dex? + +[Dex][tool-dex] is an identity service that uses [OpenID Connect][oidc] to +drive authentication for other apps. + +Alternative solutions for identity and access management exist such as [Keycloak](https://www.keycloak.org/). + +[tool-dex]: https://dexidp.io/ +[oidc]: https://openid.net/connect/ + +## Create Dex namespace + +Create a namespace where Dex will be installed: + +```yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dex +``` + +## Add credentials + +There are a [lot of options][dex-connectors] available with Dex, in this guide we will +use the [GitHub connector][dex-github]. + +We can get a GitHub ClientID and Client secret by creating a +[new OAuth application][github-oauth]. + +![GitHub OAuth configuration](/img/guides/setting-up-dex/github-oauth-application.png) + +```bash +kubectl create secret generic github-client \ + --namespace=dex \ + --from-literal=client-id=${GITHUB_CLIENT_ID} \ + --from-literal=client-secret=${GITHUB_CLIENT_SECRET} +``` + +[dex-connectors]: https://dexidp.io/docs/connectors/ +[dex-github]: https://dexidp.io/docs/connectors/github/ +[github-oauth]: https://docs.github.com/en/developers/apps/building-oauth-apps/creating-an-oauth-app + +## Deploy Dex + +As we did before, we can use `HelmRepository` and `HelmRelease` objects to let +Flux deploy everything. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: dex + namespace: dex +spec: + interval: 1m + url: https://charts.dexidp.io +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: dex + namespace: dex +spec: + interval: 5m + chart: + spec: + chart: dex + version: 0.6.5 + sourceRef: + kind: HelmRepository + name: dex + namespace: dex + interval: 1m + values: + image: + tag: v2.31.0 + envVars: + - name: GITHUB_CLIENT_ID + valueFrom: + secretKeyRef: + name: github-client + key: client-id + - name: GITHUB_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: github-client + key: client-secret + config: + # Set it to a valid URL + issuer: https://dex.dev.example.tld + + # See https://dexidp.io/docs/storage/ for more options + storage: + type: memory + + staticClients: + - name: 'Weave GitOps Core' + id: weave-gitops + secret: AiAImuXKhoI5ApvKWF988txjZ+6rG3S7o6X5En + redirectURIs: + - 'https://localhost:9001/oauth2/callback' + - 'https://0.0.0.0:9001/oauth2/callback' + - 'http://0.0.0.0:9001/oauth2/callback' + - 'http://localhost:4567/oauth2/callback' + - 'https://localhost:4567/oauth2/callback' + - 'http://localhost:3000/oauth2/callback' + + connectors: + - type: github + id: github + name: GitHub + config: + clientID: $GITHUB_CLIENT_ID + clientSecret: $GITHUB_CLIENT_SECRET + redirectURI: https://dex.dev.example.tld/callback + orgs: + - name: weaveworks + teams: + - team-a + - team-b + - QA + - name: ww-test-org + ingress: + enabled: true + className: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + hosts: + - host: dex.dev.example.tld + paths: + - path: / + pathType: ImplementationSpecific + tls: + - hosts: + - dex.dev.example.tld + secretName: dex-dev-example-tld +``` + +:::note SSL certificate without cert manager +If we don't want to use cert manager, we can remove the related annotation and +use our predefined secret in the `tls` section. +::: + +An important part of the configuration is the `orgs` field on the GitHub +connector. + +```yaml +orgs: +- name: weaveworks + teams: + - team-a + - team-b + - QA +``` + +Here we can define groups under a GitHub organisation. In this example the +GitHub organisation is `weaveworks` and all members of the `team-a`, +`team-b`, and `QA` teams can authenticate. Group membership will be added to +the user. + +Based on these groups, we can bind roles to groups: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: wego-test-user-read-resources + namespace: flux-system +subjects: + - kind: Group + name: weaveworks:QA + namespace: flux-system +roleRef: + kind: Role + name: wego-admin-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: wego-admin-role + namespace: flux-system +rules: + - apiGroups: [""] + resources: ["secrets", "pods" ] + verbs: [ "get", "list" ] + - apiGroups: ["apps"] + resources: [ "deployments", "replicasets"] + verbs: [ "get", "list" ] + - apiGroups: ["kustomize.toolkit.fluxcd.io"] + resources: [ "kustomizations" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["helm.toolkit.fluxcd.io"] + resources: [ "helmreleases" ] + verbs: [ "get", "list", "patch" ] + - apiGroups: ["source.toolkit.fluxcd.io"] + resources: ["buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories"] + verbs: ["get", "list", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] +``` + +The same way we can bind cluster roles to a group: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: weaveworks:team-a +subjects: +- kind: Group + name: weaveworks:team-a + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +``` + +### Set up static user + +For static user, add `staticPasswords` to the `config`: + +```yaml +spec: + values: + config: + staticPasswords: + - email: "admin@example.tld" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" + username: "admin" + userID: "08a8684b-db88-4b73-90a9-3cd1661f5466" +``` + +Static user password can be generated with `htpasswd`: + +```bash +echo password | htpasswd -BinC 10 admin | cut -d: -f2 +``` + +## OIDC login + +Using the "Login with OIDC Provider" button: + +![Login page](/img/guides/setting-up-dex/oidc-login.png) + +We have to authorize the GitHub OAuth application: + +![GitHub OAuth page](/img/guides/setting-up-dex/github-auth.png) + +After that, grant access to Dex: + +![Dex grant access](/img/guides/setting-up-dex/dex-auth.png) + +Now we are logged in with our GitHub user and we can see all resources we have +access to: + +![UI logged in](/img/guides/setting-up-dex/ui-logged-in.png) diff --git a/website/versioned_docs/version-0.12.0/guides/using-terraform-templates.mdx b/website/versioned_docs/version-0.12.0/guides/using-terraform-templates.mdx new file mode 100644 index 0000000000..e5ad0032ed --- /dev/null +++ b/website/versioned_docs/version-0.12.0/guides/using-terraform-templates.mdx @@ -0,0 +1,314 @@ +--- +title: Using Terraform templates +sidebar_position: 5 +--- + +import TierLabel from "../_components/TierLabel"; + +# Using Terraform templates + +This guide will show you how to use a template to create a Terraform resource in Weave GitOps Enterprise. + +## CLI guide + +### Pre-requisites +- Install [Weave GitOps Enterprise](installation/weave-gitops-enterprise.mdx) with [TF-Controller installed](installation/weave-gitops-enterprise.mdx#optional-install-the-tf-controller) and [TLS enabled](../configuration/tls.md). + +### 1. Add a template to your cluster + +Add the following template to a path in your Git repository that is synced by Flux. For example, in the [Installation guide](installation/weave-gitops-enterprise.mdx#install-flux-onto-your-cluster-with-the-flux-bootstrap-command), we set the path that is synced by Flux to `./clusters/management`. + +Commit and push these changes. Once a template is available in the cluster, it can be used to create a resource, which will be shown in the next step. + +```yaml title="./clusters/management/tf-template.yaml" +--- +apiVersion: clustertemplates.weave.works/v1alpha1 +kind: GitOpsTemplate +metadata: + name: tf-template + namespace: default +spec: + description: + This is a sample WGE template that will be translated into a tf-controller specific template. + params: + - name: RESOURCE_NAME + description: Resource Name + resourcetemplates: + - apiVersion: infra.contrib.fluxcd.io/v1alpha1 + kind: Terraform + metadata: + name: ${RESOURCE_NAME} + namespace: flux-system + spec: + interval: 1h + path: ./ + approvePlan: auto + alwaysCleanupRunnerPod: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system +``` + +Verify that your template is in the cluster: +```bash +kubectl get gitopstemplates.clustertemplates.weave.works -A +NAME AGE +sample-wge-tf-controller-template 14m +``` + +If the template does not appear immediately, reconcile the changes with Flux: +```bash +flux reconcile kustomization flux-system +► annotating Kustomization flux-system in flux-system namespace +✔ Kustomization annotated +◎ waiting for Kustomization reconciliation +✔ applied revision main/e6f5f0c3925bcfecdb50bceb12af9a87677d2213 +``` + +### 2. Use the template to create a resource +A resource can be created from a template by specifying the template's name and supplying values to it, as well as your Weave GitOps Enterprise username, password, and HTTP API endpoint. +```bash +gitops add terraform --from-template sample-wge-tf-controller-template \ +--set="RESOURCE_NAME"="name" \ +--username= --password= \ +--endpoint https://localhost:8000 \ +--url https://github.com/myawesomeorg/myawesomerepo + +Created pull request: https://github.com/myawesomeorg/myawesomerepo/pull/5 +``` + +This will create a PR in your Git repository with a TF-Controller manifest. Once the PR is merged, TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest! + +This template can be used to create multiple resources out of the same Terraform manifest by supplying different values to the template. Any changes to the Terraform manifest will be reconciled automatically to all resources. + +### 3. List available templates +Get a specific template that can be used to create a Terraform resource: +```bash +gitops get template terraform sample-wge-tf-controller-template --endpoint https://localhost:8000 --username= --password= +NAME PROVIDER DESCRIPTION ERROR +sample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template. +``` + +List all the templates available on the cluster: +```bash +gitops get template terraform --endpoint https://localhost:8000 --username= --password= +NAME PROVIDER DESCRIPTION ERROR +sample-aurora-tf-template This is a sample Aurora RDS template. +sample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template. +``` + +### 4. List the parameters of a template +List all the parameters that can be defined on a specific template: +```bash +gitops get template terraform tf-controller-aurora --list-parameters --endpoint https://localhost:8000 --username= --password= +NAME REQUIRED DESCRIPTION OPTIONS +RESOURCE_NAME false Resource Name +``` + +## Use Case: Create an Aurora RDS with WGE +:::tip BONUS + +For a more advanced example, here is a template to create an Aurora RDS cluster using WGE with Flux and the TF-Controller. +::: + +### Pre-requisites +- Everything from the [previous section](#pre-requisites) +- Get (or create) an AWS Access Key ID and Secret Access Key. Check the [AWS docs](https://docs.aws.amazon.com/powershell/latest/userguide/pstools-appendix-sign-up.html) for details on how to do this. +- Create an AWS IAM Role for the Terraform AWS Provider. Its policy should include `iam:CreateRole`. More info [here](https://support.hashicorp.com/hc/en-us/articles/360041289933-Using-AWS-AssumeRole-with-the-AWS-Terraform-Provider). + +### 1. Configure a way to manage secrets + +Configure a way to safely store Secrets. One method is to use the Mozilla SOPS CLI, but there are other ways, such as Sealed Secrets or Vaults. + +Follow the steps in the [Flux docs](https://fluxcd.io/docs/guides/mozilla-sops/) **except** for the "Configure in-cluster secrets decryption" step! This step looks slightly different for WGE. Instead of re-creating the controllers, you can configure the `kustomize-controller` as instructed below. + +In your Git repository source, add the following to your `kustomize-controller` configuration: +```bash +cat <> ./clusters//flux-system/gotk-sync.yaml + decryption: + provider: sops + secretRef: + name: sops-gpg +EOF +``` + +### 2. Encrypt and store your credentials in your Git repository +Create a Secret to store sensitive values such as the following: +- DB username +- DB password +- AWS Access Key ID +- AWS Secret Access Key +- AWS Role ARN + +:::note +If following the Flux guide, this steps corresponds to ["Encrypting secrets using OpenPGP"](https://fluxcd.io/docs/guides/mozilla-sops/#encrypting-secrets-using-openpgp). You can stop following the Flux guide at this step. +::: + +For example, here is what you would do if using the SOPS method: +```bash +kubectl -n flux-system create secret generic tf-controller-auth \ +--from-literal=master_username=admin \ +--from-literal=master_password=change-me \ +--from-literal=aws_access_key=AKIAIOSFODNN7EXAMPLE \ +--from-literal=aws_secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ +--from-literal=aws_role_arn="arn:aws:iam::012345678910:role/wge-tf-controller-example" \ +--dry-run=client \ +-o yaml > tf-controller-auth.yaml +``` + +Then, encrypt the secret: +```bash +sops --encrypt --in-place tf-controller-auth.yaml +``` + +Commit and push your changes. You can now store encrypted secrets to your Git repository. + +### 4. Add the manifests to your cluster + +Add the following Terraform manifest to the root of your Git repository. + +```yaml title="./rds.tf" +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 3.0" + } + } +} + +variable "cluster_identifier" {} +variable "database_name" {} +variable "master_username" {} +variable "master_password" {} +variable "backup_retention_period" {} +variable "region" {} +variable "aws_access_key" {} +variable "aws_secret_key" {} +variable "aws_role_arn" {} + +provider "aws" { + region = var.region + access_key = var.aws_access_key + secret_key = var.aws_secret_key + + assume_role { + role_arn = var.aws_role_arn + } +} + +locals { + engine = "aurora-mysql" + engine_version = "5.7.mysql_aurora.2.07.5" + port = 3306 +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "group-name" + values = [var.region] + } +} + +resource "aws_rds_cluster" "mycluster" { + cluster_identifier = var.cluster_identifier + engine = local.engine + engine_version = local.engine_version + port = local.port + availability_zones = slice(data.aws_availability_zones.available.names, 0, 3) + database_name = var.database_name + master_username = var.master_username + master_password = var.master_password + backup_retention_period = var.backup_retention_period + skip_final_snapshot = true + apply_immediately = true +} + +resource "aws_rds_cluster_instance" "cluster_instance" { + count = 1 + identifier = "${aws_rds_cluster.mycluster.id}-${count.index}" + cluster_identifier = aws_rds_cluster.mycluster.id + instance_class = "db.t3.small" + engine = aws_rds_cluster.mycluster.engine + engine_version = aws_rds_cluster.mycluster.engine_version +} +``` + +Add the following template to a path in your Git repository that is synced by Flux. In the [quickstart guide](installation/weave-gitops-enterprise.mdx#install-flux-onto-your-cluster-with-the-flux-bootstrap-command), we set this path to `./clusters/management`. + +```yaml title="./clusters/management/rds-template.yaml" +--- +apiVersion: clustertemplates.weave.works/v1alpha1 +kind: GitOpsTemplate +metadata: + name: rds-template + namespace: default +spec: + description: This is a sample Aurora RDS template. + params: + - name: RESOURCE_NAME + description: Resource Name + - name: CLUSTER_IDENTIFIER + description: Cluster Identifier + - name: DATABASE_NAME + description: Database Name + - name: BACKUP_RETENTION_PERIOD + description: Backup Retention Period + - name: REGION + description: Region + resourcetemplates: + - apiVersion: infra.contrib.fluxcd.io/v1alpha1 + kind: Terraform + metadata: + name: ${RESOURCE_NAME} + namespace: flux-system + spec: + interval: 1h + path: ./ + approvePlan: auto + alwaysCleanupRunnerPod: true + vars: + - name: cluster_identifier + value: ${CLUSTER_IDENTIFIER} + - name: database_name + value: ${DATABASE_NAME} + - name: backup_retention_period + value: ${BACKUP_RETENTION_PERIOD} + - name: region + value: ${REGION} + varsFrom: + - kind: Secret + name: tf-controller-auth + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system +``` + +Commit and push your changes. + +:::tip +You can change the location where you keep your Terraform manifests in your Git source (which the TF-Controller will reconcile) by configuring `spec.resourcetemplates.spec.path`. +::: + +### 5. Use the template to create the RDS +```bash +gitops add terraform --from-template rds-template \ +--username= --password= \ +--endpoint https://localhost:8000 \ +--url https://github.com/myawesomeorg/myawesomerepo \ +--set "RESOURCE_NAME"="tf-controller-aurora","CLUSTER_IDENTIFIER"="super-awesome-aurora","DATABASE_NAME"="db1","BACKUP_RETENTION_PERIOD"=5,"REGION"="us-west-2" + +Created pull request: https://github.com/myawesomeorg/myawesomerepo/pull/6 +``` + +Merge the PR in your Git repository to add the TF-Controller manifest. TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest. + +Any changes to your Terraform manifest will be automatically reconciled by the TF-controller with Flux. + +You can re-use this template to create multiple Terraform resources, each with a different set of values! + +Make sure to delete the newly created RDS resources to not incur additional costs. diff --git a/website/versioned_docs/version-0.12.0/help-and-support.md b/website/versioned_docs/version-0.12.0/help-and-support.md new file mode 100644 index 0000000000..7a2c6668e2 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/help-and-support.md @@ -0,0 +1,36 @@ +--- +title: Help and Support +sidebar_position: 7 +--- + +## Community + +👋 Come talk to us and other users in the [#weave-gitops channel](https://app.slack.com/client/T2NDH1D9D/C0248LVC719/thread/C2ND76PAA-1621532937.019800) on Weaveworks Community Slack. + +[Invite yourself](https://slack.weave.works/) if you haven't joined yet. + +### Flux + +The Flux project has a fantastic community to help support your GitOps journey, find more details on how to reach out via their [community page](https://fluxcd.io/docs/#community) + +## Commercial Support + +Weaveworks provides [Weave GitOps Enterprise](https://www.weave.works/product/gitops-enterprise/), a continuous operations product that makes it easy to deploy and manage Kubernetes clusters and applications at scale in any environment. The single management console automates trusted application delivery and secure infrastructure operations on premise, in the cloud and at the edge. + +To discuss your support needs, please contact us at [sales@weave.works](mailto:sales@weave.works). + +## Recommended resources + +Got a suggestion for this list? Please open a pull request using the "Edit this page" link at the bottom. + +### Weaveworks materials + +- [GitOps for absolute beginners](https://go.weave.works/WebContent-EB-GitOps-for-Beginners.html) - eBook from Weaveworks +- [Guide to GitOps](https://www.weave.works/technologies/gitops/) - from Weaveworks +- [Awesome GitOps](https://github.com/weaveworks/awesome-gitops) - inspired by [https://github.com/sindresorhus/awesome](https://github.com/sindresorhus/awesome) + +### Other + +- [Flux docs](https://fluxcd.io/docs) - comprehensive documentation on Flux +- [OpenGitOps](https://opengitops.dev/) - CNCF Sandbox project aiming to define a vendor-neutral, principle-led meaning of GitOps. +- [gitops.tech](https://www.gitops.tech/) - supported by Innoq diff --git a/website/versioned_docs/version-0.12.0/installation/_category_.json b/website/versioned_docs/version-0.12.0/installation/_category_.json new file mode 100644 index 0000000000..eab47a2fc3 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/installation/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Installation", + "position": 1 +} diff --git a/website/versioned_docs/version-0.12.0/installation/aws-marketplace.mdx b/website/versioned_docs/version-0.12.0/installation/aws-marketplace.mdx new file mode 100644 index 0000000000..79d3821893 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/installation/aws-marketplace.mdx @@ -0,0 +1,185 @@ +--- +title: AWS Marketplace +sidebar_position: 4 +hide_title: true +pagination_next: "getting-started" +--- + +## AWS Marketplace +Weave GitOps is also available via the AWS Marketplace. + +The following steps will allow you to deploy the Weave GitOps product to an EKS cluster via a Helm Chart. + +These instructions presume you already have installed [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), +[`eksctl`](https://github.com/weaveworks/eksctl), [`helm`](https://github.com/helm/helm) and +the [Helm S3 Plugin](https://github.com/hypnoglow/helm-s3). + +### Step 1: Subscribe to Weave GitOps on the AWS Marketplace + +To deploy the managed Weave GitOps solution, first subscribe to the product on [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-vkn2wejad2ix4). +**This subscription is only available for deployment on EKS versions 1.17-1.21.** + +_Note: it may take ~20 minutes for your Subscription to become live and deployable._ + +### [Optional] Step 2: Create an EKS cluster + +**If you already have an EKS cluster, you can skip ahead to Step 3.** + +If you do not have a cluster on EKS, you can use [`eksctl`](https://github.com/weaveworks/eksctl) to create one. + +Copy the contents of the sample file below into `cluster-config.yaml` and replace the placeholder values with your settings. +See the [`eksctl` documentation](https://eksctl.io/) for more configuration options. + +```yaml +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: CLUSTER_NAME # Change this + region: REGION # Change this + +# This section is required +iam: + withOIDC: true + serviceAccounts: + - metadata: + name: wego-service-account # Altering this will require a corresponding change in a later command + namespace: flux-system + roleOnly: true + attachPolicy: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "aws-marketplace:RegisterUsage" + Resource: '*' + +# This section will create a single Managed nodegroup with one node. +# Edit or remove as desired. +managedNodeGroups: +- name: ng1 + instanceType: m5.large + desiredCapacity: 1 +``` + +Create the cluster: + +```bash +eksctl create cluster -f cluster-config.yaml +``` + +### [Optional] Step 3: Update your EKS cluster + +**If you created your cluster using the configuration file in Step 2, your cluster is +already configured correctly and you can skip ahead to Step 4.** + +In order to use the Weave GitOps container product, +your cluster must be configured to run containers with the correct IAM Policies. + +The recommended way to do this is via [IRSA](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/). + +Use this `eksctl` configuration below (replacing the placeholder values) to: +- Associate an OIDC provider +- Create the required service account ARN + +Save the example below as `oidc-config.yaml` + +```yaml +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: CLUSTER_NAME # Change this + region: REGION # Change this + +# This section is required +iam: + withOIDC: true + serviceAccounts: + - metadata: + name: wego-service-account # Altering this will require a corresponding change in a later command + namespace: flux-system + roleOnly: true + attachPolicy: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "aws-marketplace:RegisterUsage" + Resource: '*' + +``` + +```bash +eksctl utils associate-iam-oidc-provider -f oidc-config.yaml --approve +eksctl create iamserviceaccount -f oidc-config.yaml --approve +``` + +### Step 4: Fetch the Service Account Role ARN +First retrieve the ARN of the IAM role which you created for the `wego-service-account`: + +```bash +# replace the placeholder values with your configuration +# if you changed the service account name from wego-service-account, update that in the command +export SA_ARN=$(eksctl get iamserviceaccount --cluster --region | awk '/wego-service-account/ {print $3}') + +echo $SA_ARN +# should return +# arn:aws:iam:::role/eksctl--addon-iamserviceaccount-xxx-Role1-1N41MLVQEWUOF +``` + +_This value will also be discoverable in your IAM console, and in the Outputs of the Cloud Formation +template which created it._ + +### Step 5: Install Weave GitOps + +Copy the Chart URL from the Usage Instructions in AWS Marketplace, or download the file from the Deployment template to your workstation. + +To be able to log in to your new installation, you need to set up authentication. Create a new file `values.yaml` where you set your username, and a bcrypt hash of your desired password, like so: + +```yaml title="./values.yaml" +gitops: + adminUser: + create: true + username: + passwordHash: +``` + +Then install it: + +```console +helm install wego \ + --namespace=flux-system \ + --create-namespace \ + --set serviceAccountRole="$SA_ARN" \ + --values ./values.yaml + +# if you changed the name of the service account +helm install wego \ + --namespace=flux-system \ + --create-namespace \ + --set serviceAccountName='' \ + --set serviceAccountRole="$SA_ARN" \ + --values ./values.yaml +``` + +### Step 6: Check your installation + +Run the following from your workstation: + +```console +kubectl get pods -n flux-system +# you should see something like the following returned +flux-system helm-controller-5b96d94c7f-tds9n 1/1 Running 0 53s +flux-system kustomize-controller-8467b8b884-x2cpd 1/1 Running 0 53s +flux-system notification-controller-55f94bc746-ggmwc 1/1 Running 0 53s +flux-system source-controller-78bfb8576-stnr5 1/1 Running 0 53s +flux-system wego-metering-f7jqp 1/1 Running 0 53s +flux-system ww-gitops-weave-gitops-5bdc9f7744-vkh65 1/1 Running 0 53s +``` + +Your Weave GitOps installation is now ready! + +## Next steps + +In our following [Get Started document](../getting-started.mdx), we will walk you through logging into the GitOps Dashboard and deploying an application. diff --git a/website/versioned_docs/version-0.12.0/installation/index.mdx b/website/versioned_docs/version-0.12.0/installation/index.mdx new file mode 100644 index 0000000000..67c86e3c72 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/installation/index.mdx @@ -0,0 +1,34 @@ +--- +title: Installation +sidebar_position: 1 +hide_title: true +--- + +## Installing Weave GitOps + +### Pre-requisites + +#### Kubernetes Cluster + +No matter which version of Weave GitOps you install, having a Kubernetes cluster up +and running is required. This version of Weave GitOps is tested against the following +[Kubernetes releases](https://kubernetes.io/releases/): + +| Kubernetes Release | End of Life | +| ------------------ | ----------- | +| 1.25 | 2023-10-27 | +| 1.24 | 2023-07-28 | +| 1.23 | 2023-02-28 | +| 1.22 | 2022-10-28 | + +Note that the version of [Flux](https://fluxcd.io/docs/installation/#prerequisites) that you use might impose further minimum version requirements. + +### Installing Weave GitOps itself + +Depending on your setup and requirement you have the following choice + +| Installation | +| ------------------------------------------------------ | +| [Weave GitOps](weave-gitops.mdx) | +| [Weave GitOps Enterprise](weave-gitops-enterprise.mdx) | +| [AWS Marketplace](aws-marketplace.mdx) | diff --git a/website/versioned_docs/version-0.12.0/installation/weave-gitops-enterprise.mdx b/website/versioned_docs/version-0.12.0/installation/weave-gitops-enterprise.mdx new file mode 100644 index 0000000000..b248e89725 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/installation/weave-gitops-enterprise.mdx @@ -0,0 +1,370 @@ +--- +title: Weave GitOps Enterprise +sidebar_position: 3 +hide_title: true +pagination_next: "getting-started" +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import TierLabel from "../_components/TierLabel"; +import CurlCodeBlock from "../_components/CurlCodeBlock"; + +## Installing Weave GitOps Enterprise + +Weave GitOps Enterprise (WGE) provides ops teams with an easy way to assess the +health of multiple clusters in a single place. It shows cluster information such as +Kubernetes version and number of nodes and provides details about the GitOps operations +on those clusters, such as Git repositories and recent commits. Additionally, it +aggregates Prometheus alerts to assist with troubleshooting. + +To purchase entitlement to Weave GitOps Enterprise please contact [sales@weave.works](mailto:sales@weave.works) + +:::note +There is no need to install Weave GitOps (OSS) before installing Weave GitOps Enterprise +::: + +To install Weave GitOps Enterprise + +import TOCInline from "@theme/TOCInline"; + + { + const trimStart = toc.slice(toc.findIndex((node) => node.id == 'installing-weave-gitops-enterprise')+1); + return trimStart.slice(0, trimStart.findIndex((node) => node.level == '2')); + })()} /> + +### 1. Set up a Management Cluster with `flux` + +To get you started in this document we'll cover: + +- `kind` as our management cluster with the _CAPD_ provider +- **EKS** as our management cluster with the _CAPA_ provider + +However Weave GitOps Enterprise supports any combination of management cluster and CAPI provider. + + + + +##### 1.1 We start with creating a kind-config. + +```yaml title="kind-config.yaml" +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + extraMounts: + - hostPath: /var/run/docker.sock + containerPath: /var/run/docker.sock +``` + +The `extraMounts` are for the Docker CAPI provider (CAPD) to be able to talk to the host docker + +##### 1.2 Start your kind cluster using the configuration above and Kubernetes v1.23.6 + +```bash +kind create cluster --config kind-config.yaml --image=kindest/node:v1.23.6 +``` + + + + +##### 1.1 Prepare IAM for installation + +The Cluster API needs special permissions in AWS. Use the `clusterawsadm` command below to roll out a CloudStack to installs the permissions into your AWS account. While the CloudStack is bound to a region, the resulting permissions are globally scoped. You can use any AWS Region that you have access to. The `clusterawsadm` command takes an AWSIAMConfiguration file. We have provided a working example for you : + +```yaml title="eks-config.yaml" +apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1beta1 +kind: AWSIAMConfiguration +spec: + bootstrapUser: + enable: true + eks: + iamRoleCreation: false # Set to true if you plan to use the EKSEnableIAM feature flag to enable automatic creation of IAM roles + defaultControlPlaneRole: + disable: false # Set to false to enable creation of the default control plane role + managedMachinePool: + disable: false # Set to false to enable creation of the default node pool role +``` + +Run `clusterawsadm` command to create the IAM group. + +```bash +$ clusterawsadm bootstrap iam create-cloudformation-stack --config eks-config.yaml --region $REGION +``` + +Create an IAM User. This user will be used as a kind of service account. Assign the newly created group to this user. The group name will be something like: `cluster-api-provider-aws-s-AWSIAMGroupBootstrapper-XXXX`. Create a secret for the newly created IAM user. + +##### 1.2 Create the cluster + +In testing we used the following values +`$INSTANCESIZE` : t3.large +`$NUMOFNODES` : 2 +`$MINNODES` : 2 +`$MAXNODES` : 6 + +```bash +eksctl create cluster -n "$CLUSTERNAME" -r "$REGION" --nodegroup-name workers -t $INSTANCESIZE --nodes $NUMOFNODES --nodes-min $MINNODES --nodes-max $MAXNODES --ssh-access --alb-ingress-access +``` + +##### 1.3 Add cluster to kubeconfig + +Once the cluster is created, add the cluster to your `kubeconfig` + +```bash +aws eks --region "$REGION" update-kubeconfig --name "$CLUSTERNAME" +``` + + + + +##### Install Flux onto your cluster with the `flux bootstrap` command. + + + + +```bash +flux bootstrap github \ + --owner= \ + --repository=fleet-infra \ + --branch=main \ + --path=./clusters/management \ + --personal +``` + + + + + +```bash +flux bootstrap gitlab \ + --owner= \ + --repository=fleet-infra \ + --branch=main \ + --path=./clusters/management \ + --personal +``` + + + + + + +* **owner** - The username (or organization) of the git repository +* **repository** - Git repository name +* **branch** - Git branch (default "main") +* **path** - path relative to the repository root, when specified the cluster sync will be scoped to this path +* **personal** - if set, the owner is assumed to be a repo user + +More information about `flux` and the `flux bootstrap` command can be found [here](https://fluxcd.io/docs/cmd/) + +:::note At this point a few things have occurred: +* Your Flux management cluster is now running +* A new git repo was created based on the parameters you set in the `flux bootstrap` command. Take a look at your repositories. +::: + +### 2. Install a CAPI provider + +:::note `clusterctl` versions + +The example templates provided in this guide have been tested with `clusterctl` version `1.1.3`. However you might need to use an older or newer version depending on the capi-providers you plan on using. + +Download a specific version of clusterctl from the [releases page](https://github.com/kubernetes-sigs/cluster-api/releases). +::: + +In order to be able to provision Kubernetes clusters, a CAPI provider needs to be installed. See [Cluster API Providers](../cluster-management/cluster-api-providers.mdx) page for more details on providers. +Here we'll continue with our example instructions for CAPD and CAPA. + + + + +```bash +# Enable support for `ClusterResourceSet`s for automatically installing CNIs +export EXP_CLUSTER_RESOURCE_SET=true + +clusterctl init --infrastructure docker +``` + + + + +```bash +export EXP_EKS=true +export EXP_MACHINE_POOL=true +export CAPA_EKS_IAM=true +export EXP_CLUSTER_RESOURCE_SET=true + +clusterctl init --infrastructure aws +``` + + + + +### 3. Apply the entitlements secret + +Contact sales@weave.works for a valid entitlements secret. Then apply it to the cluster: + +```bash +kubectl apply -f entitlements.yaml +``` + +### 4. Configure access for writing to git from the UI + + + +GitHub requires no additional configuration for OAuth git access + + + +Create a GitLab OAuth Application that will request `api` permissions to create pull requests on the user's behalf. +Follow the [GitLab docs](https://docs.gitlab.com/ee/integration/oauth_provider.html). + +The application should have at least these scopes: + +- `api` +- `openid` +- `email` +- `profile` + +Add callback URLs to the application for each address the UI will be exposed on, e.g.: + +- `https://localhost:8000/oauth/gitlab` For port-forwarding and testing +- `https://git.example.com/oauth/gitlab` For production use + +Save your application and take note of the **Client ID** and **Client Secret** and save +them into the `git-provider-credentials` secret along with: + +- `GIT_HOST_TYPES` to tell WGE that the host is gitlab +- `GITLAB_HOSTNAME` where the OAuth app is hosted + +**Replace values** in this snippet and run: + +```bash +kubectl create secret generic git-provider-credentials --namespace=flux-system \ + --from-literal="GITLAB_CLIENT_ID=13457" \ + --from-literal="GITLAB_CLIENT_SECRET=24680" \ + --from-literal="GITLAB_HOSTNAME=git.example.com" \ + --from-literal="GIT_HOST_TYPES=git.example.com=gitlab" +``` + + + + + +### 5. Configure and commit + +We deploy WGE via a Helm chart. We'll save and adapt the below template, before committing it to git to a flux-reconciled path. + +Clone the newly created repo locally as we're gonna add some things! + +``` +git clone git@:/fleet-infra +cd fleet-infra +``` + +Download the helm-release to `clusters/management/weave-gitops-enterprise.yaml` and tweak: + +import ExampleWGE from "../assets/example-enterprise-helm.yaml"; +import ExampleWGEContent from "!!raw-loader!../assets/example-enterprise-helm.yaml"; + + + +#### `values.config.capi.repositoryURL` +Ensure this has been set to your repository URL. + +#### `values.config.capi.repositoryPath` +By default, WGE will create new clusters in the `clusters/management/clusters` path. +This can be configured with `values.config.capi.repositoryPath`. +For example you might what to change it to `clusters/my-cluster/cluster` if you configured flux to reconcile `./clusters/my-cluster` instead. + +#### `values.config.capi.repositoryClustersPath` +The other important path to configure is where applications and workloads that will be run on the new cluster will be stored. +By default this is `./clusters`. When a new cluster is specified any profiles that have been selected will be written to `./clusters/{.namespace}/{.clusterName}/profiles.yaml`. +When the new cluster is bootstrapped, flux will be sync the `./clusters/{.namespace}/{.clusterName}` path. + +#### (Optional) Install policy agent + +[Policy agent](../policy/intro.mdx) comes packaged with the WGE chart. To install it you need to set the following values: + +- `values.policy-agent.enabled`: set to true to install the agent with WGE +- `values.policy-agent.config.accountId`: organization name, used as identifier +- `values.policy-agent.config.clusterId`: unique identifier for the cluster + +Commit and push all the files + +```bash +git add clusters/management/weave-gitops-enterprise.yaml +git commit -m "Deploy Weave GitOps Enterprise" +git push +``` + +Flux will reconcile the helm-release and WGE will be deployed into the cluster. You can check the `flux-system` namespace to verify all pods are running. + +### 6. Configure password + +In order to login to the WGE UI, you need to generate a bcrypt hash for your chosen password and store it as a secret in the Kubernetes cluster. + +There are several different ways to generate a bcrypt hash, this guide uses `gitops get bcrypt-hash` from our CLI, which can be installed by following +the instructions [here](#gitops-cli). + +```bash +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q +``` + +Use the hashed output to create a Kubernetes username/password secret. + +```bash +kubectl create secret generic cluster-user-auth \ + --namespace flux-system \ + --from-literal=username=wego-admin \ + --from-literal=password='$2a$.......' +``` + +### Install CLI +Install the Weave GitOps Enterprise CLI tool. +You can use brew or curl + +```console +brew install weaveworks/tap/gitops-ee +``` + +```bash +curl --silent --location "https://artifacts.wge.dev.weave.works/releases/bin/0.11.0/gitops-$(uname | tr '[:upper:]' '[:lower:]')-$(uname -m).tar.gz" | tar xz -C /tmp +sudo mv /tmp/gitops /usr/local/bin +gitops version +``` + +## Next steps + +In our following [Get Started document](../getting-started.mdx), we will walk you through logging into the GitOps Dashboard and deploying an application. + +Then you can head over to either: + +- [Cluster Management - Getting started](../cluster-management/getting-started.mdx) to create your first CAPI Cluster with `kind`/CAPD +- [Deploying CAPA with EKS](../guides/deploying-capa.mdx) to create your first CAPI Cluster with EKS/CAPA. + +### (Optional) Install the TF-Controller + +The [TF-Controller](https://weaveworks.github.io/tf-controller/) is a controller for Flux to reconcile Terraform resources in a GitOps way. + +With Flux and the TF-Controller, Weave GitOps Enterprise makes it easy to add Terraform templates to clusters and continuously reconcile any changes made to the Terraform source manifest. + +Check out our guide on how to [use Terraform templates](../guides/using-terraform-templates.mdx), and why not try your hands at using it with the RDS example! + +Install the TF-Controller to a cluster using Helm: + +```console +# Add tf-controller helm repository +helm repo add tf-controller https://weaveworks.github.io/tf-controller/ + +# Install tf-controller +helm upgrade -i tf-controller tf-controller/tf-controller \ + --namespace flux-system +``` + +Consult the TF-Controller [Installation](https://weaveworks.github.io/tf-controller/getting_started/) documentation for more details on which parameters are configurable and how to install a specific version. diff --git a/website/versioned_docs/version-0.12.0/installation/weave-gitops.mdx b/website/versioned_docs/version-0.12.0/installation/weave-gitops.mdx new file mode 100644 index 0000000000..bd771639e4 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/installation/weave-gitops.mdx @@ -0,0 +1,175 @@ +--- +title: Weave GitOps +sidebar_position: 2 +hide_title: true +pagination_next: "getting-started" +--- + +## Installing Weave GitOps + +### Before you begin + +We will provide a complete walk-through of getting Flux installed and Weave GitOps configured. However, if you have: +- an existing cluster bootstrapped Flux version >= 0.32.0 🎉 +- followed our [installation](./index.mdx) doc to configure access to the Weave GitOps dashboard then install Weave GitOps 👏 + +Then you can skip ahead to [the Weave GitOps overview](../getting-started.mdx#part-1---weave-gitops-overview) 🏃 +but note ⚠️ you may need to alter commands where we are committing files to GitHub ⚠️. + +To follow along, you will need the following: +- A Kubernetes cluster - such as [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/). +- A [GitHub](https://github.com) account and [personal access token with repo permissions](https://help.github.com/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line). +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). + +#### Install Flux + +Weave GitOps is an extension to Flux and therefore requires that Flux 0.32 or later has already been installed on your Kubernetes cluster. Full documentation is available at: [https://fluxcd.io/docs/installation/](https://fluxcd.io/docs/installation/). + +This version of Weave GitOps is tested against the following Flux releases: +* 0.32 +* 0.33 +* 0.34 +* 0.35 +* 0.36 + +1. Install the flux CLI + + ``` + brew install fluxcd/tap/flux + ``` + + For other installation methods, see the relevant [Flux documentation](https://fluxcd.io/docs/installation/#install-the-flux-cli). + +1. Export your credentials + + ``` + export GITHUB_TOKEN= + export GITHUB_USER= + ``` + +1. Check your Kubernetes cluster + + ``` + flux check --pre + ``` + + The output is similar to: + ``` + ► checking prerequisites + ✔ kubernetes 1.22.2 >=1.20.6 + ✔ prerequisites checks passed + ``` + +1. Install Flux onto your cluster with the `flux bootstrap` command + + ``` + flux bootstrap github \ + --owner=$GITHUB_USER \ + --repository=fleet-infra \ + --branch=main \ + --path=./clusters/my-cluster \ + --personal + ``` + +Full installation documentation including how to work with other Git providers is available [here](https://fluxcd.io/docs/installation/). + +The bootstrap command above does the following: + +- Create a git repository fleet-infra on your GitHub account +- Add Flux component manifests to the repository +- Deploy Flux Components to your Kubernetes Cluster +- Configure Flux components to track the path /clusters/my-cluster/ in the repository + +### Install Weave GitOps + +For this guide we will use the cluster user, for complete documentation including how to configure an OIDC provider see the documentation [here](../configuration/securing-access-to-the-dashboard.mdx). + +#### gitops CLI + +Weave GitOps includes a command-line interface to help users create and manage resources. + +:::note Installation options +The `gitops` CLI is currently supported on Mac (x86 and Arm), and Linux - including Windows Subsystem for Linux (WSL). + +Windows support is a [planned enhancement](https://github.com/weaveworks/weave-gitops/issues/663). +::: + +There are multiple ways to install the `gitops` CLI: + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + + + + +```bash +curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.12.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp +sudo mv /tmp/gitops /usr/local/bin +gitops version +``` + + + + +```console +brew tap weaveworks/tap +brew install weaveworks/tap/gitops +``` + + + + +### Deploying Weave GitOps + +1. Clone your git repository where Flux has been bootstrapped. + + ``` + git clone https://github.com/$GITHUB_USER/fleet-infra + cd fleet-infra + ``` + +1. Run the following command which will create a `HelmRepository` and `HelmRelease` to deploy Weave GitOps + + ``` + PASSWORD="" + gitops create dashboard ww-gitops \ + --password=$PASSWORD \ + --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml + ``` + +:::warning +This command stores a hash of a password. While this is relatively safe for demo and testing purposes it is recommend that you look at more secure methods of storing secrets (such as [Flux's SOPS integration](https://fluxcd.io/docs/guides/mozilla-sops/)) for production systems. +::: + +1. Commit and push the `weave-gitops-dashboard.yaml` to the `fleet-infra` repository + + ``` + git add -A && git commit -m "Add Weave GitOps Dashboard" + git push + ``` + +1. Validate that Weave GitOps and Flux are installed + + ``` + kubectl get pods -n flux-system + ``` + + You should see something similar to: + + ``` + NAME READY STATUS RESTARTS AGE + helm-controller-5bfd65cd5f-gj5sz 1/1 Running 0 10m + kustomize-controller-6f44c8d499-s425n 1/1 Running 0 10m + notification-controller-844df5f694-2pfcs 1/1 Running 0 10m + source-controller-6b6c7bc4bb-ng96p 1/1 Running 0 10m + ww-gitops-weave-gitops-86b645c9c6-k9ftg 1/1 Running 0 5m + ``` + +:::info +There's many other things you can configure in the weave gitops helm chart. For a reference, see [our value file reference](../references/helm-reference.md). +::: + + +## Next steps + +In our following [Get Started document](../getting-started.mdx), we will walk you through logging into the GitOps Dashboard and deploying an application. diff --git a/website/versioned_docs/version-0.12.0/intro.md b/website/versioned_docs/version-0.12.0/intro.md new file mode 100644 index 0000000000..5dfaea8a50 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/intro.md @@ -0,0 +1,64 @@ +--- +title: Introduction +sidebar_position: 0 +hide_title: true +--- +# Weave GitOps + +Weave GitOps is a powerful extension to [Flux](https://fluxcd.io), a leading GitOps engine and CNCF project. Weave GitOps provides insights into your application deployments, and makes continuous delivery with GitOps easier to adopt and scale across your teams. + +Its web UI surfaces key information to help application operators easily discover and resolve issues. The intuitive interface provides a guided experience to build understanding and simplify getting started for new users; they can easily discover the relationship between Flux objects and navigate to deeper levels of information as required. + +Weave GitOps is an open source project sponsored by [Weaveworks](https://weave.works) - the GitOps company, and original creators of [Flux](https://fluxcd.io). + +## Why adopt GitOps? + +> "GitOps is the best thing since configuration as code. Git changed how we collaborate, but declarative configuration is the key to dealing with infrastructure at scale, and sets the stage for the next generation of management tools" + +- Kelsey Hightower, Staff Developer Advocate, Google.

+ +Adopting GitOps can bring a number of key benefits: + +- Faster and more frequent deployments +- Easy recovery from failures +- Improved security and auditability + +To learn more about GitOps, check out these resources: + +- [GitOps for absolute beginners](https://go.weave.works/WebContent-EB-GitOps-for-Beginners.html) - eBook from Weaveworks +- [Guide to GitOps](https://www.weave.works/technologies/gitops/) - from Weaveworks +- [OpenGitOps](https://opengitops.dev/) - CNCF Sandbox project aiming to define a vendor-neutral, principle-led meaning of GitOps. +- [gitops.tech](https://www.gitops.tech/) - supported by Innoq + +## Getting Started + +To start your own journey with Weave GitOps, please see [Installation](./installation/index.mdx) and [Getting Started](./getting-started.mdx). + +Here is a quick demo of what you can look forward to: + +import ReactPlayer from "react-player/lazy"; + +
+ +
+ +## Features + +- **Applications view** - allows you to quickly understand the state of your deployments across a cluster at a glance. It shows summary information from `kustomization` and `helmrelease` objects. +- **Sources view** - shows the status of resources which are synchronizing content from where you have declared the desired state of your system, for example Git repositories. This shows summary information from `gitrepository`, `helmrepository` and `bucket` objects. +- **Flux Runtime view** - provides status on the GitOps engine continuously reconciling your desired and live state. It shows your installed GitOps Toolkit Controllers and their version. +- Drill down into more detailed information on any given Flux resource. +- Uncover relationships between resources and quickly navigate between them. +- Understand how workloads are reconciled through a directional graph. +- View Kubernetes events relating to a given object to understand issues and changes. +- Secure access to the dashboard through the ability to integrate with an OIDC provider (such as Dex) or through a configurable cluster user. +- Fully integrates with [Flux](https://fluxcd.io/docs/) as the GitOps engine to provide: + - Continuous Delivery through GitOps for apps and infrastructure + - Support for GitHub, GitLab, Bitbucket, and even use s3-compatible buckets as a source; all major container registries; and all CI workflow providers. + - A secure, pull-based mechanism, operating with least amount of privileges, and adhering to Kubernetes security policies. + - Compatible with any conformant [Kubernetes version](https://fluxcd.io/docs/installation/#prerequisites) and common ecosystem technologies such as Helm, Kustomize, RBAC, Prometheus, OPA, Kyverno, etc. + - Multitenancy, multiple git repositories, multiple clusters + - Alerts and notifications diff --git a/website/versioned_docs/version-0.12.0/policy/_category_.json b/website/versioned_docs/version-0.12.0/policy/_category_.json new file mode 100644 index 0000000000..f388934091 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Policy", + "position": 6 +} diff --git a/website/versioned_docs/version-0.12.0/policy/commit-time-checks.mdx b/website/versioned_docs/version-0.12.0/policy/commit-time-checks.mdx new file mode 100644 index 0000000000..b499b0944a --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/commit-time-checks.mdx @@ -0,0 +1,204 @@ +--- +title: Commit/Build Time Checks +hide_title: true +sidebar_position: 8 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; + +# Commit/Build time checks + +## Overview +Weave GitOps Enterprise enable developers and operators to check policy violations early in their software development life cycle, specifically at commit and build time. Developers and operators can have Weave Policy Validator integrated in their CI tools to validate whether their code changes are violating any policies or not. + +Weave GitOps Enterprise offer a policy engine image that can be used to perform commit/build time checks.The image can be found on Docker Hub under the name: `weaveworks/weave-iac-validator:v1.1`. + +--- +## Usage +```bash +USAGE: + main [global options] command [command options] [arguments...] + +VERSION: + 0.0.1 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --path value path to resources kustomization directory + --helm-values-file value path to resources helm values file + --policies-path value path to policies kustomization directory + --policies-helm-values-file value path to policies helm values file + --git-repo-provider value git repository provider [$WEAVE_REPO_PROVIDER] + --git-repo-url value git repository url [$WEAVE_REPO_URL] + --git-repo-branch value git repository branch [$WEAVE_REPO_BRANCH] + --git-repo-sha value git repository commit sha [$WEAVE_REPO_SHA] + --git-repo-token value git repository token [$WEAVE_REPO_TOKEN] + --sast value save result as gitlab sast format + --sarif value save result as sarif format + --json value save result as json format + --generate-git-report generate git report if supported (default: false) [$WEAVE_GENERATE_GIT_PROVIDER_REPORT] + --remediate auto remediate resources if possible (default: false) + --no-exit-error exit with no error (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) +``` +--- +## Setup policies +Policies can be helm chart, kustomize directory or just plain kubernetes yaml files. + +Example of policies kustomize directory +```bash +└── policies + ├── kustomization.yaml + ├── minimum-replica-count.yaml + ├── privileged-mode.yaml + └── privilege-escalation.yaml +``` + +```yaml +# kustomization.yaml +kind: Kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +resources: +- minimum-replica-count.yaml +- privilege-escalation.yaml +- privileged-mode.yaml +``` + +--- +## Auto-Remediation +Weave validator supports auto-remediation functionality which creates a pull request with suggested fixes to remediate the reported violations. + +Supported in: +- [ ] Helm +- [x] Kustomize +- [x] Plain kubernetes files + +To enable it you need to provide ```--remediate``` flag and ```--git-repo-token```. + +> The token must have the permission to create pull request + +--- +## UseCase: Github +See how to setup the [Github Action](https://github.com/weaveworks/weave-action) + +--- +## UseCase: Gitlab + +```yaml +weave: + image: + name: weaveworks/weave-iac-validator:v1.1 + script: + - weave-validator --path --policies-path +``` + +#### Enable Auto Remediation + +```yaml + script: + - weave-validator --path --policies-path --git-repo-token $GITLAB_TOKEN --remediate +``` +--- +#### Enable Static Application Security Testing + +```yaml +stages: + - weave + - sast + +weave: + stage: weave + image: + name: weaveworks/weave-iac-validator:v1.1 + script: + - weave-validator --policies-path --sast sast.json + artifacts: + when: on_failure + paths: + - sast.json + +upload_sast: + stage: sast + when: always + script: + - echo "creating sast report" + artifacts: + reports: + sast: sast.json +``` +--- +## UseCase: Bitbucket + +```yaml +pipelines: + default: + - step: + name: 'Weaveworks' + image: weaveworks/weave-iac-validator:v1.1 + script: + - weave-validator --path --policies-path +``` +#### Enable Auto Remediation + +```yaml + script: + - weave-validator --path --policies-path --git-repo-token $TOKEN --remediate +``` + +#### Create Pipeline Report + +```yaml + script: + - weave-validator --path --policies-path --git-repo-token $TOKEN -generate-git-report +``` + +--- +## UseCase: CircleCI + +```yaml +jobs: + weave: + docker: + - image: weaveworks/weave-iac-validator:v1.1 + steps: + - checkout + - run: + command: weave-validator --path --policies-path +``` + +#### Enable Auto Remediation + +```yaml + - run: + command: weave-validator --path --policies-path --git-repo-token ${GITHUB_TOKEN} --remediate +``` + +--- +## UseCase: Azure DevOps + +```yaml +trigger: +- + +pool: + vmImage: ubuntu-latest + +container: + image: weaveworks/weave-iac-validator:v1.1-azure + +steps: +- script: weave-validator --path --policies-path --git-repo-token $(TOKEN) +``` + +#### Enable Auto Remediation + +```yaml +steps: +- script: weave-validator --path --policies-path --git-repo-token $(TOKEN) --remediate +``` diff --git a/website/versioned_docs/version-0.12.0/policy/configuration.mdx b/website/versioned_docs/version-0.12.0/policy/configuration.mdx new file mode 100644 index 0000000000..8d410fbe81 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/configuration.mdx @@ -0,0 +1,107 @@ +--- +title: Weave Policy Agent Configuration +hide_title: true +sidebar_position: 3 +--- + + +# Configuration + +The config file is the single entry point for configuring the agent. + +The agent needs the following parameters to be provided in the configuration yaml file: + +- `kubeConfigFile`: path to the kubernetes config file to access the cluster +- `accountId`: unique identifier that signifies the owner of that agent +- `clusterId`: unique identifier for the cluster that the agent will run against + +There are additional parameters could be provided: + +- `logLevel`: app log level (default: "info") +- `probesListen`: address for the probes server to run on (default: ":9000") +- `metricsAddress`: address the metric endpoint binds to (default: ":8080") +- `audit`: defines cluster periodical audit configuration including the supported sinks (disabled by default) +- `admission`: defines admission control configuration including the supported sinks and webhooks (disabled by default) +- `tfAdmission`: defines terraform admission control configuration including the supported sinks (disabled by default) + + +**Example** + +```yaml +accountId: "account-id" +clusterId: "cluster-id" +kubeConfigFile: "/.kube/config" +logLevel: "Info" +admission: + enabled: true + sinks: + filesystemSink: + fileName: admission.txt +audit: + enabled: true + writeCompliance: true + sinks: + filesystemSink: + fileName: audit.txt +``` + +## Validation Sinks Configuration + +### Kubernetes Events + +This sink is used to export validation results as kubernetes native events. Kubernetes event has a retention period and it set by default to 1 hour, you can configure the kubernetes [api-server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) to update the period. + +**Configuration** + +```yaml +sinks: + k8sEventsSink: + enabled: true +``` + +### Flux Notification Controller + +This sink sends the validation results to [Flux Notification Controller](https://github.com/fluxcd/notification-controller). + +**Configuration** + +```yaml +sinks: + fluxNotificationSink: + address: <> +``` + +### File System + +File system sink writes the validation results to a text file. The file will be located at `/logs/` + +**Configuration** + +```yaml +sinks: + fileSystemSink: + fileName: audit.txt +``` + +### ElasticSearch + +This sink stores the validation results in ElasticSearch. + +**Configuration** + +```yaml +sinks: + elasticSink: + address: http://localhost:9200 # ElasticSearch server address + username: # User credentials to access ElasticSearch service + password: # User credentials to access ElasticSearch service + indexName: # index name the results would be written in + insertionMode: # It could be a choice of both insert or upsert, it defines the way the document is written. +``` + +#### Insertion modes + +- `insert`: would give an insight of all the historical data, doesn't update or delete any old records. so the index would contain a log for all validation objects. + +- `upsert`: Would update the old result of validating an entity against a policy happens in the same day, so the index would only contain the latest validation results for a policy and entity combination per day. + diff --git a/website/versioned_docs/version-0.12.0/policy/getting-started.mdx b/website/versioned_docs/version-0.12.0/policy/getting-started.mdx new file mode 100644 index 0000000000..ec4c2d6c43 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/getting-started.mdx @@ -0,0 +1,84 @@ +--- +title: Getting Started +sidebar_position: 1 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; +import CodeBlock from "@theme/CodeBlock"; +import BrowserOnly from "@docusaurus/BrowserOnly"; + +# Getting started + +This section introduces you to the Policy Profile and details the steps required to install it in Weave GitOps. + +## Pre-requisites + +### Weave GitOps +You need to have a running instance of Weave GitOps with at least one CAPI provider installed to provision Kubernetes clusters. See [Weave GitOps Installation](https://docs.gitops.weave.works/docs/installation/) page for more details about installing Weave GitOps. + +### Policy Library +For the policy agent to work, it will need a source for the policies that it will enforce in the cluster. You should have a policy library repo set up which includes your policies resources as CRDs. You can also add a `kustomization.yaml` file selecting the policies you want to install on that specific cluster that will be provisioned by Weave Gitops: + +``` +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- policies/ +- policies/ +- policies/ +``` + +:::info +Enterprise customers should have access to fork policy library repo into their local repositories. +::: + +## Install agent on management cluster + +The agent comes packaged with the WGE chart. To install it you need to set the following values: + +- `values.policy-agent.enabled`: set to true to install the agent with WGE +- `values.policy-agent.accountId`: organization name, used as identifier +- `values.policy-agent.clusterId`: unique identifier for the cluster + +## Install Policy Profile +To install the policy profile on a cluster, you should select the `weave-policy-agent` from the profiles dropdown in the `Create Cluster` page. + +![Policy Profile](./img/weave-policy-profile.png) + +You should then configure the `values.yaml`. You can find more about the policy profile configurations [here](../weave-policy-profile/). + +Add or link to profile config + +``` +policySource: + url: URL of the repo where your policies exist + tag: tag name on the policies repo + path: Path to the policies dir - or a kustomization.yaml that selects some policies - in the repo + secretRef (if the repo is private): Name of the K8s secret with private repo credentials (leave empty if the repo is public) +``` + +## Policies in UI +After the leaf cluster is provisioned and the profile is installed, you should now see the policies listed in the Policies tab in Weave GitOps UI. + +![Policies](./img/weave-policies.png) + +Now you have a provisioned cluster with these policies enforced by the policy agent. + +> By default, the policy profile is set up to enforce policies at deployment time using admission controller, which results in blocking any deployment that violates the enforced policies. + +## Prevent Violating Changes +Now let's try to deploy a Kubernetes deployment that violates one of the enforced policies. Let's deploy a deployment that has `spec.securityContext.allowPrivilegeEscalation` as `true`. This violates the `Allow Privilege Escalation` policy. + +Once you apply it, the policy agent will deny this request and show a violation message. + +## Violations Logs in UI +You can view all the violation log in Weave GitOps UI to view all connected clusters policy violations, and where you can dive into the details of each violation. + +Violations Log + +![Violations Logs](./img/violations-logs.png) + +Violations Log Details + +![Violation Log Details](./img/violations-log-detail.png) diff --git a/website/versioned_docs/version-0.12.0/policy/img/violations-log-detail.png b/website/versioned_docs/version-0.12.0/policy/img/violations-log-detail.png new file mode 100644 index 0000000000..a180387bf7 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/policy/img/violations-log-detail.png differ diff --git a/website/versioned_docs/version-0.12.0/policy/img/violations-logs.png b/website/versioned_docs/version-0.12.0/policy/img/violations-logs.png new file mode 100644 index 0000000000..58773740d9 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/policy/img/violations-logs.png differ diff --git a/website/versioned_docs/version-0.12.0/policy/img/weave-policies.png b/website/versioned_docs/version-0.12.0/policy/img/weave-policies.png new file mode 100644 index 0000000000..bff055674d Binary files /dev/null and b/website/versioned_docs/version-0.12.0/policy/img/weave-policies.png differ diff --git a/website/versioned_docs/version-0.12.0/policy/img/weave-policy-profile.png b/website/versioned_docs/version-0.12.0/policy/img/weave-policy-profile.png new file mode 100644 index 0000000000..287904a316 Binary files /dev/null and b/website/versioned_docs/version-0.12.0/policy/img/weave-policy-profile.png differ diff --git a/website/versioned_docs/version-0.12.0/policy/intro.mdx b/website/versioned_docs/version-0.12.0/policy/intro.mdx new file mode 100644 index 0000000000..79465dac79 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/intro.mdx @@ -0,0 +1,23 @@ +--- +title: Introduction +sidebar_position: 0 +hide_title: true +--- + +import TierLabel from "../_components/TierLabel"; + +

+ {frontMatter.title} +

+ +## Policy + +Weave Policy Engine help users to have continuous security and compliance checks across their software delivery pipeline. The engine utilize policy-as-code to guarantee security, resilience and coding standards across applications and infrastructure. The engine comes alongside with 100+ policies covering SOC2, GDPR, PCI-DSS, HIPAA, Mitre Attack and more. + +The policy engine provide the following functionality: +

Admission Controller

+An out-of-the-box admission controller that monitors any changes happening to the clusters deployments and resources, and prevent violating changes at deployment time from being deployed to clusters. +

Audit

+Daily scans for your clusters deployments and resources, then report back any policy violations. The audit results can be published to different data analytics tools to provide compliance posture analysis for your clusters runtime. +

Commit/Build Time Checks

+Early feedback on policy violations at the commit or build time, by reporting policy violations right inside git or other CI tools. That helps developers and operators detect policy violations and fix them before they deploy their changes to the clusters. diff --git a/website/versioned_docs/version-0.12.0/policy/policy-configuration.mdx b/website/versioned_docs/version-0.12.0/policy/policy-configuration.mdx new file mode 100644 index 0000000000..e3282ef27c --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/policy-configuration.mdx @@ -0,0 +1,278 @@ +--- +title: Weave Policy Configuration +hide_title: true +sidebar_position: 6 +--- + +# Policy Configuration + +## Goal + +Users sometimes need to enforce the same policy(s) with different configurations (parameters) for different targets (applications, resources, or namespaces). + +## Schema + +A new `PolicyConfig` CRD allows using policies with multiple configurations by configuring policy parameters based on a certain match on applications or resources with Schema and match with one of the following + +- Match by namespaces + + ```yaml + apiVersion: pac.weave.works/v2beta2 + kind: PolicyConfig # policy config resource kind + metadata: + name: my-config # policy config name + spec: + match: # matches (targets of the policy config) + namespaces: # add one or more name spaces + - dev + - prod + config: # config for policies [one or more] + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 + ``` + +- Match by apps + + ```yaml + apiVersion: pac.weave.works/v2beta2 + kind: PolicyConfig # policy config resource kind + metadata: + name: my-config # policy config name + spec: + match: # matches (targets of the policy config) + apps: # add one or more apps [HelmRelease, Kustomization] + - kind: HelmRelease + name: my-app # app name + namespace: flux-system # app namespace [if empty will match in any namespace] + config: # config for policies [one or more] + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 + ``` + +- Match by resources + + ```yaml + apiVersion: pac.weave.works/v2beta2 + kind: PolicyConfig # policy config resource kind + metadata: + name: my-config # policy config name + spec: + match: # matches (targets of the policy config) + resources: # add one or more resources [Deployment, ReplicaSet, ..] + - kind: Deployment + name: my-deployment # resource name + namespace: default # resource namespace [if empty will match in any namespace] + config: # config for policies [one or more] + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 + ``` + +## Priority of enforcing multiple configs with overlapping targets [from low to high] + +- Policy configs which targets the namespace. +- Policy config which targets an application in all namespaces. +- Policy config which targets an application in a certain namespace. +- Policy config which targets a kubernetes resource in all namespaces. +- Policy config which targets a kubernetes resource in a specific namespace. + +**Note**: +- All configs are applied from low priority to high priority as well as common parameters between configs. +- Each config only affects the parameters defined in it. + +### Example + +- We have Kustomization application `app-a` and deployment `deployment-1` part of this application + +```yaml +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-1 +spec: + match: + namespaces: + - flux-system + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 2 + owner: owner-1 +--- +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-2 +spec: + match: + apps: + - kind: Kustomization + name: app-a + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 +--- +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-3 +spec: + match: + apps: + - kind: Kustomization + name: app-a + namespace: flux-system + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 4 +--- +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-4 +spec: + match: + resources: + - kind: Deployment + name: deployment-1 + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 5 + owner: owner-4 +--- + +apiVersion: pac.weave.works/v2beta2 +kind: PolicyConfig +metadata: + name: my-config-5 +spec: + match: + resources: + - kind: Deployment + name: deployment-1 + namespace: flux-system + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 6 +``` + +**In the previous example when you apply the 5 configurations** + +- `app-a` will be affected by `my-config-5`. It will be applied on the policies defined in it, which will affect deployment `deployment-1` in namespace `flux-system` as it matches the kind, name and namespace. + +**Note**: +Deploying `deployment-1` in another namespace other than `flux-system` won't be affected by this configuration + + Final config values will be as the following: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 6 # from my-config-5 + owner: owner-4 # from my-config-4 + ``` + - Deployment `deployment-1` in namespace `flux-system` replica_count must be `>= 6` + - Also it will be affected by `my-config-4` for `owner` configuration parameter `owner: owner-4` + + +**In the previous example when you apply `my-config-1`, `my-config-2`, `my-config-3` and `my-config-4`** + +- `my-config-4` will be applied on the policies defined in it. which will affect deployment `deployment-1` in all namespaces as it matches the kind and name only. + + Final config values will be as the following: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 5 # from my-config-4 + owner: owner-4 # from my-config-4 + ``` + + - Deployment `deployment-1` in all namespaces replica_count must be `>= 5` + - Also it will be affected by `my-config-4` for `owner` configuration parameter `owner: owner-4` + +**In the previous example when you apply `my-config-1`, `my-config-2` and `my-config-3`** + +- `my-config-3` will be applied on the policies defined in it. which will affect application `app-a` and all the resources in it in namespace `flux-system` as it matches the kind, name and namespace. + +**Note**: +Deploying `app-a` in another namespace other than `flux-system` won't be affected by this configuration + + Final config values will be as the following: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 4 # from my-config-3 + owner: owner-1 # from my-config-1 + ``` + + - Application `app-a` and all the resources in it in namespaces `flux-system` replica_count must be `>= 4` + - Also it will be affected by `my-config-1` for `owner` configuration parameter `owner: owner-1` + +**In the previous example when you apply `my-config-1` and `my-config-2`** + +- `my-config-2` will be applied on the policies defined in it. which will affect application `app-a` and all the resources in it in all namespaces as it matches the kind and name only. + + Final config values will be as the following: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 3 # from my-config-2 + owner: owner-1 # from my-config-1 + ``` + + - Application `app-a` and all the resources in it in all namespaces replica_count must be `>= 3` + - Also it will be affected by `my-config-1` for `owner` configuration parameter `owner: owner-1` + +**In the previous example when you apply `my-config-1`** + +- `my-config-1` will be applied on the policies defined in it. which will affect the namespace `flux-system` with all applications and resources in it as it matches by namespace only. + + Final config values will be as the following: + + ```yaml + config: + weave.policies.containers-minimum-replica-count: + parameters: + replica_count: 2 # from my-config-1 + owner: owner-1 # from my-config-1 + ``` + + - Any application or resource in namespace `flux-system` replica_count must be `>= 2` + - Also it will be affected by `my-config-1` for `owner` configuration parameter `owner: owner-1` + + +**Note** + +- You can use one or more policies as the following example + + ```yaml + apiVersion: pac.weave.works/v2beta2 + kind: PolicyConfig + metadata: + name: my-app-config + spec: + match: + resources: + name: my-deployment + kind: Deployment + config: + weave.policies.policy-1: + params: + replica_count: 3 + weave.policies.policy-2: + params: + run_as_root: true + ``` diff --git a/website/versioned_docs/version-0.12.0/policy/policy-set.mdx b/website/versioned_docs/version-0.12.0/policy/policy-set.mdx new file mode 100644 index 0000000000..afaf489b95 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/policy-set.mdx @@ -0,0 +1,109 @@ +--- +title: Weave Policy Set +hide_title: true +sidebar_position: 5 +--- + + +# PolicySet + +This is an optional resource. It is used to select group of policies to work in specific modes. + +In each mode, The agent will list all the PolicySets of this mode and check which policies match any of those policysets, Then validate the resources against them. + +If there is no policy set found all policies will work on all modes. + +> Note: [Tenant Policies](./policy.mdx#tenant-policy) is always active in the [Admission](#admission) mode, event if it is not selected in the `admission` policysets + +**Example** +```yaml +apiVersion: pac.weave.works/v2beta2 +kind: PolicySet +metadata: + name: my-policy-set +spec: + mode: admission + filters: + ids: + - weave.policies.containers-minimum-replica-count + categories: + - security + severities: + - high + - medium + standards: + - pci-dss + tags: + - tag-1 +``` + +## Modes + +### Audit + +This mode performs the audit functionality. It triggers per the specified interval (by default every 24 hour) and then lists all the resources in the cluster which the agent has access to read and validates those resources against the audit policies. + +> Works with policies of provider `kubernetes` + + +### Admission + +This contains the admission module that enforces policies. It uses the `controller-runtime` Kubernetes package to register a callback that will be called when the agent receives an admission request. Once called, the agent will validate the received resource against the admission and tenant policies and k8s will use the result of this validation to either allow or reject the creation/update of said resource. + +> Works with policies of provider `kubernetes` + + +### Terraform Admission + +This is a webhook used to validate terraform plans. It is mainly used by the [TF-Controller](https://github.com/weaveworks/tf-controller) to enforce policies on terraform plans + +> Works with policies of provider `terraform` + + +## Grouping Policies + +Policies can be grouped by their ids, categories, severities, standards and tags + +The policy will be matched if any of the filters are matched. + + +## Migration from v2beta1 to v2beta2 + +### New fields +- New required field `spec.mode` is added. PolicySets should be updated to set the mode + +Previously the agent was configured with which policysets to use in each mode. Now we removed this argument from the agent's configuration and +add the mode to the Policyset itself. + +#### Example of the agent configuration in versions older than v2.0.0 + +```yaml +# config.yaml +admission: + enabled: true + policySet: admission-policy-set + sinks: + filesystemSink: + fileName: admission.txt +``` + +#### Example of current PolicySet with mode field + +```yaml +apiVersion: pac.weave.works/v2beta2 +kind: PolicySet +metadata: + name: admission-policy-set +spec: + mode: admission + filters: + ids: + - weave.policies.containers-minimum-replica-count +``` + + +### Updated fields +- Field `spec.name` became optional. + +### Deprecate fields +- Field `spec.id` is deprecated. diff --git a/website/versioned_docs/version-0.12.0/policy/policy.mdx b/website/versioned_docs/version-0.12.0/policy/policy.mdx new file mode 100644 index 0000000000..58df1168c1 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/policy.mdx @@ -0,0 +1,21 @@ +--- +title: Weave Policy +hide_title: true +sidebar_position: 4 +--- + +# Policy CRD + +This is the main resource and it is used to define policies which will be evaluated by the policy agent. + +It uses [OPA Rego Language](https://www.openpolicyagent.org/docs/latest/policy-language) to evaluate the entities. + +## Policy Library + +Here is the Weaveworks [Policy Library](https://github.com/weaveworks/policy-library) + +## Tenant Policy + +It is used in [Multi Tenancy](https://docs.gitops.weave.works/docs/enterprise/multi-tenancy/) feature in [Weave GitOps Enterprise](https://docs.gitops.weave.works/docs/enterprise/intro/) + +Tenant policies has a special tag `tenancy`. diff --git a/website/versioned_docs/version-0.12.0/policy/releases.mdx b/website/versioned_docs/version-0.12.0/policy/releases.mdx new file mode 100644 index 0000000000..85fa295ac0 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/releases.mdx @@ -0,0 +1,108 @@ +--- +title: Profile Releases +hide_title: true +sidebar_position: 7 +--- + +import TierLabel from "../_components/TierLabel"; + +# Profile Releases + +## v0.6.4 + +### Highlights +- **Agent** + - Add PolicyConfig CRD to make it possible to customize policy configuration per namespaces, applications or resources + - Add mode field to policy set and add policy modes to its status + - Add policy modes to labels to support filtering + - Support backward compatibility for policy version v2beta1 + +### Dependency Versions + +- Policy Agent v2.0.0 + +### Policy Library Compatibility + +Compatible with Policy Library versions: + +- v1.2.0 + +Needs this [migration steps](./policy-set.mdx#migration-from-v2beta1-to-v2beta2) to be compatible with the following versions: + +- v1.1.0 +- v1.0.0 +- v0.4.0 + + +## v0.6.3 + +### Highlights +- **Agent** + - Reference flux objects in violations events instead of the original resource object to be able to list specific flux application violations + +### Dependency Versions + +- policy-agent 1.2.1 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 +- v1.1.0 + +## v0.6.2 + +### Highlights +- **Agent** + - Add Terraform mode to allow validating terraform plans + - Support targeting kubernetes HPA resources + +### Dependency Versions + +- policy-agent 1.2.0 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 +- v1.1.0 + +While both v.0.4.0 and v1.0.0 are compatible with the agent. Only v1.1.0 includes the modification needed to make Controller Minimum Replica Count policy with with `horizontalpodautoscalers` + +## v0.6.1 + +### Highlights +- **Agent** + - Make the audit interval configurable through `config.audit.interval`. It defaults to 24 hours. + - Add support for targeting certain flux resources (kustomizations, helmreleases and ocirepositories) in the admission mode. +- **Profile** + - Add the ability to use an existing GitSource instead of creating a new one. + + +### Dependency Versions + +- policy-agent 1.1.0 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 + +## v0.6.0 + +### Highlights +- **Agent** + - Configure the agent through a configuration file instead of arguments. + - Allow defining different validation sinks for audit and admission modes. + - Add the PolicySet CRD to the hem chart. +- **Profile** + - Disable the default policy source. + +### Dependency Versions + +- policy-agent 1.0.0 + +### Policy Library Compatibility + +- v0.4.0 +- v1.0.0 diff --git a/website/versioned_docs/version-0.12.0/policy/weave-policy-profile.mdx b/website/versioned_docs/version-0.12.0/policy/weave-policy-profile.mdx new file mode 100644 index 0000000000..bdd05f605e --- /dev/null +++ b/website/versioned_docs/version-0.12.0/policy/weave-policy-profile.mdx @@ -0,0 +1,296 @@ +--- +title: Weave Policy Profile +hide_title: true +sidebar_position: 2 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +import TierLabel from "../_components/TierLabel"; + +# Weave policy profile + +# Weave Policy Profile + +## Overview + +Weave policy profile provides policies to automate the enforcement of best practice and conventions. It ensures the compliance of workloads through the use of a policy agent that provides an admission controller webhook that stops violating resources from deploying to a cluster and runs a daily audit that reports violating resources already deployed. + +--- +## Policy Sources + +Policies are provided in the profile as Custom Resources. The agent reads from the policies deployed on the cluster and runs them during each admission request or when auditing a resource. + +Policies are hosted in a policy library which is usually a git repository. They are fetched in the profile through the use of `kustomize.toolkit.fluxcd.io.Kustomization`, that deploys the policies to the cluster. + +By default all policies in the specified path would be deployed in order to specify which policies should be deployed in a library, a `kustomize.config.k8s.io.Kustomization` file should be defined in the repository. + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: # specifies the path to each required policy + - policies/ControllerContainerAllowingPrivilegeEscalation/policy.yaml + - policies/ControllerContainerRunningAsRoot/policy.yaml + - policies/ControllerReadOnlyFileSystem/policy.yaml +``` + +The profile then needs to be configured with the necessary config to be able to reach the repository that is acting as a policy library. + +```yaml +policySource: + enabled: true + url: URL of the repo where your policies exist + tag: tag name on the policies repo + path: Path to the policies dir - or a kustomization.yaml that selects some policies - in the repo + secretRef (if the repo is private): Name of the K8s secret with private repo credentials (leave empty if the repo is public) +``` + +There is the option of referencing an existing policy library source instead of creating a new one. +```yaml +policySource: + enabled: true + sourceRef: + kind: Kind of the existing source + name: Name of the policy library source + namespace: Namespace where the source exists +``` +--- +## Admission Controller Setup + +To enable admission control: + +```yaml +policy-agent: + config: + admission: + enabled: true +``` + +Enabling admission controller requires certificates for secure communication with the webhook client and the admission server. The best way to achieve this is by installing [cert manager](https://cert-manager.io/docs/installation/) and then configuring the profile as follows: + +```yaml +policy-agent: + useCertManager: true +``` + +There is the option of providing previously generated certificates although it is not recommended and it is up to the user to manage it: + +```yaml +policy-agent: + certificate: "---" # admission server certificate + key: "---" # admission server private key + caCertificate: "---" # CA bundle to validate the webhook server, used by the client +``` + +If the agent webhook could not be reached or the request failed to complete, the corresponding request would be refused. To change that behavior and accepts the request in cases of failure, this needs to be set: + +```yaml +policy-agent: + failurePolicy: Ignore +``` + +--- +## Audit +Audit functionality provide a full scan on the cluster(s) and report back policy violations. This usually is used for policy violations reporting, and Compliance posture analysis against known benchmarks like PCI DSS, CIS, .etc. + +To enable audit functionality: + +```yaml +policy-agent: + config: + audit: + enabled: true + interval: 24 # configuring the frequent of audit operations running in hours (default is 24 hour) +``` + +Audit will be performed when the agent starts and then at an interval of your choice in hours (default is 24 hour). The results from that sink would be published by the registered sinks. + +--- +## Policy Sets + +Policy set is a custom resource that gives more control over which policies to be used in each scenario. There are cases in which certain policies are required to be observed but denying the requests of violating objects would be disruptive. Policy set allows defining additional filters for each scenario: `Audit` and `Admission` so it is possible to report violations on certain policies without the need of blocking the deployments if certain policies are not as critical as others. + +Policy set should also be hosted on the policy library. The following definition defines which specific policies should be used using policy names: + +```yaml +apiVersion: pac.weave.works/v2beta1 +kind: PolicySet +metadata: + name: admission-policy-set +spec: + id: admission-policy-set + name: admission-policy-set + filters: + ids: + - weave.policies.containers-running-with-privilege-escalation + - weave.policies.containers-read-only-root-filesystem +``` + +To make use of this policy set in the profile: + +```yaml +policy-agent: + config: + admission: + policySet: admission-policy-set # name of policy set to be used for admission + audit: + policySet: audit-policy-set # name of policy set to be used for audit +``` + +--- +## Policy Validation Sinks + +When validating a resource a validation object is generated that contains information about the status of that validation and metadata about the resource and policy involved. These objects should be exported to be visible for users as a critical part of the audit flow, but can also be useful as logs for the admission scenario. + +By default the agent only writes policy validation that are violating a certain policy when performing an audit, to write compliance results as well, the following needs to be specified in the profile: + +```yaml +policy-agent: + config: + audit: + writeCompliance: true +``` + +The agent profile supports multiple methods for different scenarios either Admission or Audit to expose this data and multiple can be used at the same time: + + + + + +The results would be dumped into a text file in `logs` directory in the agent container as a json string. It is important to note that this file would not be persistent and would be deleted upon pod restart, so generally this approach is not recommended for production environment. + +To enable writing to a text file in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + fileSystemSink: + fileName: "file.json" +``` + +To enable writing to a text file in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + fileSystemSink: + fileName: "file.json" +``` + +It is possible to make the file persistent, this assumes that there is a [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) already configured on the cluster. + +```yaml +policy-agent: + persistence: + enabled: false # specifies whether to use persistence or not + claimStorage: 1Gi # claim size + storageClassName: standard # k8s StorageClass name +``` + + +The results would be written as Kubernetes events. This means that they are accessible through the kubernetes API and can be consumed by custom exporters. + +To enable writing Kubernetes events in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + k8sEventsSink: + enabled: true +``` + +To enable writing Kubernetes events in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + k8sEventsSink: + enabled: true +``` + + +This requires the cluster to be managed using flux. It makes use of flux notification controller to send events to multiple sources, depending on the controller configuration. The agent writes the events to the controller and it proceeds to publish it to the configured listeners. + +To enable writing to flux notification controller in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + fluxNotificationSink: + address: "" +``` + +To enable writing to flux notification controller in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + fluxNotificationSink: + address: "" +``` + + +The results of validating entities against policies would be written in Elasticsearch index. + +To enable writing to elasticsearch in audit scenario: + +```yaml +policy-agent: + config: + audit: + sinks: + elasticSink: + address: "" + username: "" + password: "" + indexName: "" + insertionMode: "upsert" +``` + +To enable writing to flux notification controller in admission scenario: + +```yaml +policy-agent: + config: + admission: + sinks: + elasticSink: + address: "" + username: "" + password: "" + indexName: "" + insertionMode: "insert" +``` + + + +--- +## Policy Validation + +Policy validation object contains all the necessary information to give the user a clear idea on what caused it. It is the result of validating an entity against a policy. + +```yaml +id: string # identifier for the violation +account_id: string # organization identifier +cluster_id: string # cluster identifier +policy: object # contains related policy data +entity: object # contains related resource data +status: string # Violation or Compliance +message: string # message that summarizes the policy validation +type: string # Admission or Audit +trigger: string # what triggered the validation, create request or initial audit,.. +created_at: string # time that the validation occurred in +``` diff --git a/website/versioned_docs/version-0.12.0/references/_category_.json b/website/versioned_docs/version-0.12.0/references/_category_.json new file mode 100644 index 0000000000..61fb9bb3be --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "References", + "position": 9 +} diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/_category_.json b/website/versioned_docs/version-0.12.0/references/cli-reference/_category_.json new file mode 100644 index 0000000000..8164f0a2ca --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Reference", +} diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops.md new file mode 100644 index 0000000000..3742895f5e --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops.md @@ -0,0 +1,47 @@ +## gitops + +Weave GitOps + +### Synopsis + +Command line utility for managing Kubernetes applications via GitOps. + +### Examples + +``` + + # Get help for gitops create dashboard command + gitops create dashboard -h + gitops help create dashboard + + # Get the version of gitops along with commit, branch, and flux version + gitops version + + To learn more, you can find our documentation at https://docs.gitops.weave.works/ + +``` + +### Options + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + -h, --help help for gitops + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops beta](gitops_beta.md) - This component contains unstable or still-in-development functionality +* [gitops check](gitops_check.md) - Validates flux compatibility +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell +* [gitops create](gitops_create.md) - Creates a resource +* [gitops get](gitops_get.md) - Display one or many Weave GitOps resources +* [gitops remove](gitops_remove.md) - Remove various components of Weave GitOps +* [gitops set](gitops_set.md) - Sets one or many Weave GitOps CLI configs or resources +* [gitops version](gitops_version.md) - Display gitops version + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_beta.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_beta.md new file mode 100644 index 0000000000..d3f5e7569a --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_beta.md @@ -0,0 +1,27 @@ +## gitops beta + +This component contains unstable or still-in-development functionality + +### Options + +``` + -h, --help help for beta +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops beta run](gitops_beta_run.md) - Set up an interactive sync between your cluster and your local file system + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_beta_run.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_beta_run.md new file mode 100644 index 0000000000..d908ce531d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_beta_run.md @@ -0,0 +1,77 @@ +## gitops beta run + +Set up an interactive sync between your cluster and your local file system + +### Synopsis + +This will set up a sync between the cluster in your kubeconfig and the path that you specify on your local filesystem. If you do not have Flux installed on the cluster then this will add it to the cluster automatically. This is a requirement so we can sync the files successfully from your local system onto the cluster. Flux will take care of producing the objects for you. + +``` +gitops beta run [flags] +``` + +### Examples + +``` + +# Run the sync on the current working directory +gitops beta run . [flags] + +# Run the sync against the dev overlay path +gitops beta run ./deploy/overlays/dev + +# Run the sync on the dev directory and forward the port. +# Listen on port 8080 on localhost, forwarding to 5000 in a pod of the service app. +gitops beta run ./dev --port-forward port=8080:5000,resource=svc/app + +# Run the sync on the dev directory with a specified root dir. +gitops beta run ./clusters/default/dev --root-dir ./clusters/default + +# Run the sync on the podinfo demo. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./deploy/overlays/dev --timeout 3m --port-forward namespace=dev,resource=svc/backend,port=9898:9898 + +# Run the sync on the podinfo Helm chart. Please note that file Chart.yaml must exist in the directory. +git clone https://github.com/stefanprodan/podinfo +cd podinfo +gitops beta run ./chart/podinfo --timeout 3m --port-forward namespace=flux-system,resource=svc/run-dev-helm-podinfo,port=9898:9898 +``` + +### Options + +``` + --allow-k8s-context strings The name of the KubeConfig context to explicitly allow. + --components strings The Flux components to install. (default [source-controller,kustomize-controller,helm-controller,notification-controller]) + --components-extra strings Additional Flux components to install, allowed values are image-reflector-controller,image-automation-controller. + --context string The name of the kubeconfig context to use + --dashboard-hashed-password string GitOps Dashboard password in BCrypt hash format + --dashboard-port string GitOps Dashboard port (default "9001") + --flux-version string The version of Flux to install. (default "0.37.0") + -h, --help help for run + --no-bootstrap Disable bootstrapping at shutdown. + --no-session Disable session management. If not specified, the session will be enabled by default. + --port-forward string Forward the port from a cluster's resource to your local machine i.e. 'port=8080:8080,resource=svc/app'. + --root-dir string Specify the root directory to watch for changes. If not specified, the root of Git repository will be used. + --session-name string Specify the name of the session. If not specified, the name of the current branch and the last commit id will be used. (default "run-main-9229d8f5-dirty") + --session-namespace string Specify the namespace of the session. (default "default") + --skip-dashboard-install Skip installation of the Dashboard. This also disables the prompt asking whether the Dashboard should be installed. + --skip-resource-cleanup Skip resource cleanup. If not specified, the GitOps Run resources will be deleted by default. + --timeout duration The timeout for operations during GitOps Run. (default 5m0s) +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops beta](gitops_beta.md) - This component contains unstable or still-in-development functionality + diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_check.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_check.md new file mode 100644 index 0000000000..a2768cd696 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_check.md @@ -0,0 +1,39 @@ +## gitops check + +Validates flux compatibility + +``` +gitops check [flags] +``` + +### Examples + +``` + +# Validate flux and kubernetes compatibility +gitops check + +``` + +### Options + +``` + -h, --help help for check +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion.md new file mode 100644 index 0000000000..c7e485eacf --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion.md @@ -0,0 +1,36 @@ +## gitops completion + +Generate the autocompletion script for the specified shell + +### Synopsis + +Generate the autocompletion script for gitops for the specified shell. +See each sub-command's help for details on how to use the generated script. + + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops completion bash](gitops_completion_bash.md) - Generate the autocompletion script for bash +* [gitops completion fish](gitops_completion_fish.md) - Generate the autocompletion script for fish +* [gitops completion powershell](gitops_completion_powershell.md) - Generate the autocompletion script for powershell +* [gitops completion zsh](gitops_completion_zsh.md) - Generate the autocompletion script for zsh + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_bash.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_bash.md new file mode 100644 index 0000000000..1dc93cb59d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_bash.md @@ -0,0 +1,55 @@ +## gitops completion bash + +Generate the autocompletion script for bash + +### Synopsis + +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(gitops completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + gitops completion bash > /etc/bash_completion.d/gitops + +#### macOS: + + gitops completion bash > $(brew --prefix)/etc/bash_completion.d/gitops + +You will need to start a new shell for this setup to take effect. + + +``` +gitops completion bash +``` + +### Options + +``` + -h, --help help for bash + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_fish.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_fish.md new file mode 100644 index 0000000000..8a36fb553b --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_fish.md @@ -0,0 +1,46 @@ +## gitops completion fish + +Generate the autocompletion script for fish + +### Synopsis + +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + gitops completion fish | source + +To load completions for every new session, execute once: + + gitops completion fish > ~/.config/fish/completions/gitops.fish + +You will need to start a new shell for this setup to take effect. + + +``` +gitops completion fish [flags] +``` + +### Options + +``` + -h, --help help for fish + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_powershell.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_powershell.md new file mode 100644 index 0000000000..c29408c27f --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_powershell.md @@ -0,0 +1,43 @@ +## gitops completion powershell + +Generate the autocompletion script for powershell + +### Synopsis + +Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + gitops completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. + + +``` +gitops completion powershell [flags] +``` + +### Options + +``` + -h, --help help for powershell + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_zsh.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_zsh.md new file mode 100644 index 0000000000..59d3e29492 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_completion_zsh.md @@ -0,0 +1,57 @@ +## gitops completion zsh + +Generate the autocompletion script for zsh + +### Synopsis + +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(gitops completion zsh); compdef _gitops gitops + +To load completions for every new session, execute once: + +#### Linux: + + gitops completion zsh > "${fpath[1]}/_gitops" + +#### macOS: + + gitops completion zsh > $(brew --prefix)/share/zsh/site-functions/_gitops + +You will need to start a new shell for this setup to take effect. + + +``` +gitops completion zsh [flags] +``` + +### Options + +``` + -h, --help help for zsh + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops completion](gitops_completion.md) - Generate the autocompletion script for the specified shell + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_create.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_create.md new file mode 100644 index 0000000000..2f94cf440d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_create.md @@ -0,0 +1,40 @@ +## gitops create + +Creates a resource + +### Examples + +``` + +# Create a HelmRepository and HelmRelease to deploy Weave GitOps +gitops create dashboard ww-gitops \ + --password=$PASSWORD \ + --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml + +``` + +### Options + +``` + --export Export in YAML format to stdout. + -h, --help help for create + --timeout duration The timeout for operations during resource creation. (default 3m0s) +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops create dashboard](gitops_create_dashboard.md) - Create a HelmRepository and HelmRelease to deploy Weave GitOps + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_create_dashboard.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_create_dashboard.md new file mode 100644 index 0000000000..70ee678779 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_create_dashboard.md @@ -0,0 +1,47 @@ +## gitops create dashboard + +Create a HelmRepository and HelmRelease to deploy Weave GitOps + +### Synopsis + +Create a HelmRepository and HelmRelease to deploy Weave GitOps + +``` +gitops create dashboard [flags] +``` + +### Examples + +``` + +# Create a HelmRepository and HelmRelease to deploy Weave GitOps +gitops create dashboard ww-gitops \ + --password=$PASSWORD \ + --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml + +``` + +### Options + +``` + --context string The name of the kubeconfig context to use + -h, --help help for dashboard + --password string The password of the dashboard admin user. + --username string The username of the dashboard admin user. (default "admin") +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --export Export in YAML format to stdout. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + --timeout duration The timeout for operations during resource creation. (default 3m0s) +``` + +### SEE ALSO + +* [gitops create](gitops_create.md) - Creates a resource + diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get.md new file mode 100644 index 0000000000..78ffc77d80 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get.md @@ -0,0 +1,40 @@ +## gitops get + +Display one or many Weave GitOps resources + +### Examples + +``` + +# Get the CLI configuration for Weave GitOps +gitops get config + +# Generate a hashed secret +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops get bcrypt-hash](gitops_get_bcrypt-hash.md) - Generates a hashed secret +* [gitops get config](gitops_get_config.md) - Prints out the CLI configuration for Weave GitOps + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get_bcrypt-hash.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get_bcrypt-hash.md new file mode 100644 index 0000000000..ae3fdad238 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get_bcrypt-hash.md @@ -0,0 +1,39 @@ +## gitops get bcrypt-hash + +Generates a hashed secret + +``` +gitops get bcrypt-hash [flags] +``` + +### Examples + +``` + +PASSWORD="" +echo -n $PASSWORD | gitops get bcrypt-hash + +``` + +### Options + +``` + -h, --help help for bcrypt-hash +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops get](gitops_get.md) - Display one or many Weave GitOps resources + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get_config.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get_config.md new file mode 100644 index 0000000000..d92cd490a2 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_get_config.md @@ -0,0 +1,37 @@ +## gitops get config + +Prints out the CLI configuration for Weave GitOps + +``` +gitops get config [flags] +``` + +### Examples + +``` + +# Prints out the CLI configuration for Weave GitOps +gitops get config +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops get](gitops_get.md) - Display one or many Weave GitOps resources + diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_remove.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_remove.md new file mode 100644 index 0000000000..7386b7fdde --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_remove.md @@ -0,0 +1,27 @@ +## gitops remove + +Remove various components of Weave GitOps + +### Options + +``` + -h, --help help for remove +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops remove run](gitops_remove_run.md) - Remove GitOps Run sessions + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_remove_run.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_remove_run.md new file mode 100644 index 0000000000..af620f6e3b --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_remove_run.md @@ -0,0 +1,50 @@ +## gitops remove run + +Remove GitOps Run sessions + +### Synopsis + +Remove GitOps Run sessions + +``` +gitops remove run [flags] +``` + +### Examples + +``` + +# Remove the GitOps Run session "dev-1234" from the "flux-system" namespace +gitops remove run --namespace flux-system dev-1234 + +# Remove all GitOps Run sessions from the default namespace +gitops remove run --all-sessions + +# Remove all GitOps Run sessions from the dev namespace +gitops remove run -n dev --all-sessions + +``` + +### Options + +``` + --all-sessions Remove all GitOps Run sessions + --context string The name of the kubeconfig context to use + -h, --help help for run +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops remove](gitops_remove.md) - Remove various components of Weave GitOps + diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_set.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_set.md new file mode 100644 index 0000000000..b2d2b68123 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_set.md @@ -0,0 +1,35 @@ +## gitops set + +Sets one or many Weave GitOps CLI configs or resources + +### Examples + +``` + +# Enables analytics in the current user's CLI configuration for Weave GitOps +gitops set config analytics true +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps +* [gitops set config](gitops_set_config.md) - Set the CLI configuration for Weave GitOps + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_set_config.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_set_config.md new file mode 100644 index 0000000000..bdf59cf9f4 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_set_config.md @@ -0,0 +1,37 @@ +## gitops set config + +Set the CLI configuration for Weave GitOps + +``` +gitops set config [flags] +``` + +### Examples + +``` + +# Enables analytics in the current user's CLI configuration for Weave GitOps +gitops set config analytics true +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops set](gitops_set.md) - Sets one or many Weave GitOps CLI configs or resources + diff --git a/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_version.md b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_version.md new file mode 100644 index 0000000000..d7b0328cfb --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/cli-reference/gitops_version.md @@ -0,0 +1,30 @@ +## gitops version + +Display gitops version + +``` +gitops version [flags] +``` + +### Options + +``` + -h, --help help for version +``` + +### Options inherited from parent commands + +``` + -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --namespace string The namespace scope for this operation (default "flux-system") + -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable + -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable +``` + +### SEE ALSO + +* [gitops](gitops.md) - Weave GitOps + +###### Auto generated by spf13/cobra on 8-Dec-2022 diff --git a/website/versioned_docs/version-0.12.0/references/helm-reference.md b/website/versioned_docs/version-0.12.0/references/helm-reference.md new file mode 100644 index 0000000000..a90e14c75e --- /dev/null +++ b/website/versioned_docs/version-0.12.0/references/helm-reference.md @@ -0,0 +1,67 @@ +# Helm chart reference + + +This is a reference of all the configurable values in weave gitops's +helm chart. This is intended for customizing your installation after +you've gone through the [getting started](../getting-started.mdx) guide. + +This reference was generated for the chart version 4.0.9 which installs weave gitops v0.12.0. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| adminUser.create | bool | `false` | Whether the local admin user should be created. If you use this make sure you add it to `rbac.impersonationResourceNames`. | +| adminUser.createClusterRole | bool | `true` | Specifies whether the clusterRole & binding to the admin user should be created. Will be created only if `adminUser.create` is enabled. Without this, the adminUser will only be able to see resources in the target namespace. | +| adminUser.createSecret | bool | `true` | Whether we should create the secret for the local adminUser. Will be created only if `adminUser.create` is enabled. Without this, we'll still set up the roles and permissions, but the secret with username and password has to be provided separately. | +| adminUser.passwordHash | string | `nil` | Set the password for local admin user. Requires `adminUser.create` and `adminUser.createSecret` This needs to have been hashed using bcrypt. You can do this via our CLI with `gitops get bcrypt-hash`. | +| adminUser.username | string | `"gitops-test-user"` | Set username for local admin user, this should match the value in the secret `cluster-user-auth` which can be created with `adminUser.createSecret`. Requires `adminUser.create`. | +| affinity | object | `{}` | | +| annotations | object | `{}` | Annotations to add to the deployment | +| envVars[0].name | string | `"WEAVE_GITOPS_FEATURE_TENANCY"` | | +| envVars[0].value | string | `"true"` | | +| envVars[1].name | string | `"WEAVE_GITOPS_FEATURE_CLUSTER"` | | +| envVars[1].value | string | `"false"` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"ghcr.io/weaveworks/wego-app"` | | +| image.tag | string | `"v0.12.0"` | | +| imagePullSecrets | list | `[]` | | +| ingress.annotations | object | `{}` | | +| ingress.className | string | `""` | | +| ingress.enabled | bool | `false` | | +| ingress.hosts | string | `nil` | | +| ingress.tls | list | `[]` | | +| logLevel | string | `"info"` | What log level to output. Valid levels are 'debug', 'info', 'warn' and 'error' | +| metrics.enabled | bool | `false` | Start the metrics exporter | +| metrics.service.annotations | object | `{"prometheus.io/path":"/metrics","prometheus.io/port":"{{ .Values.metrics.service.port }}","prometheus.io/scrape":"true"}` | Annotations to set on the service | +| metrics.service.port | int | `2112` | Port to start the metrics exporter on | +| nameOverride | string | `""` | | +| networkPolicy.create | bool | `true` | Specifies whether default network policies should be created. | +| nodeSelector | object | `{}` | | +| oidcSecret.create | bool | `false` | | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | | +| rbac.additionalRules | list | `[]` | If non-empty, these additional rules will be appended to the RBAC role and the cluster role. for example, additionalRules: - apiGroups: ["infra.contrib.fluxcd.io"] resources: ["terraforms"] verbs: [ "get", "list", "patch" ] | +| rbac.create | bool | `true` | Specifies whether the clusterRole & binding to the service account should be created | +| rbac.impersonationResourceNames | list | `[]` | If non-empty, this limits the resources that the service account can impersonate. This applies to both users and groups, e.g. `['user1@corporation.com', 'user2@corporation.com', 'operations']` | +| rbac.impersonationResources | list | `["users","groups"]` | Limit the type of principal that can be impersonated | +| rbac.viewSecretsResourceNames | list | `["cluster-user-auth","oidc-auth"]` | If non-empty, this limits the secrets that can be accessed by the service account to the specified ones, e.g. `['weave-gitops-enterprise-credentials']` | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.readOnlyRootFilesystem | bool | `true` | | +| securityContext.runAsNonRoot | bool | `true` | | +| securityContext.runAsUser | int | `1000` | | +| securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| serverTLS.enable | bool | `false` | Enable TLS termination in gitops itself. If you enable this, you need to create a secret, and specify the secretName. Another option is to create an ingress. | +| serverTLS.secretName | string | `"my-secret-tls"` | Specify the tls secret name. This type of secrets have a key called `tls.crt` and `tls.key` containing their corresponding values in base64 format. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets for more details and examples | +| service.annotations | object | `{}` | | +| service.create | bool | `true` | | +| service.port | int | `9001` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| tolerations | list | `[]` | | diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/_category_.json b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/_category_.json new file mode 100644 index 0000000000..a0f1d3a400 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Using Terraform CRD", + "position": 2 +} \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/custom-backend.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/custom-backend.mdx new file mode 100644 index 0000000000..7402914428 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/custom-backend.mdx @@ -0,0 +1,46 @@ +--- +title: Configure Custom Backend +sidebar_position: 3 +hide_title: true +--- + +# Configure Custom Backend + +By default, `tf-controller` will use the [Kubernetes backend](https://www.terraform.io/language/settings/backends/kubernetes) to store the Terraform state file (tfstate) in cluster. + +The tfstate is stored in a secret named: `tfstate-${workspace}-${secretSuffix}`. The default `suffix` will be the name of the Terraform resource, however you may override this setting using `.spec.backendConfig.secretSuffix`. The default `workspace` name is "default", you can also override the workspace by setting `.spec.workspace` to another value. + +If you wish to use a custom backend, you can configure it by defining the `.spec.backendConfig.customConfiguration` with one of the backends such as **GCS** or **S3**, for example: + +```yaml hl_lines="9-21" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + backendConfig: + customConfiguration: | + backend "s3" { + bucket = "s3-terraform-state1" + key = "dev/terraform.tfstate" + region = "us-east-1" + endpoint = "http://localhost:4566" + skip_credentials_validation = true + skip_metadata_api_check = true + force_path_style = true + dynamodb_table = "terraformlock" + dynamodb_endpoint = "http://localhost:4566" + encrypt = true + } + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + runnerPodTemplate: + spec: + image: registry.io/tf-runner:xyz +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/customize-runner.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/customize-runner.mdx new file mode 100644 index 0000000000..fbe09111a0 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/customize-runner.mdx @@ -0,0 +1,63 @@ +--- +title: Customize Runner Pods +sidebar_position: 3 +hide_title: true +--- + +# Customize Runner Pod's Metadata + +In some situations, it is needed to add custom labels and annotations to the runner pod used to reconcile Terraform. +For example, for Azure AKS to grant pod active directory permissions using Azure Active Directory (AAD) Pod Identity, +a label like `aadpodidbinding: myIdentity` on the pod is required. + +```yaml +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + runnerPodTemplate: + metadata: + labels: + aadpodidbinding: myIdentity + annotations: + company.com/abc: xyz +``` + +## Customize Runner Pod Image + +By default, the Terraform controller uses `RUNNER_POD_IMAGE` environment variable to identify the Runner Pod's image to use. You can customize the image on the global level by updating the value of the environment variable or, you can specify an image to use per Terraform object for its reconciliation. + +```yaml +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + runnerPodTemplate: + spec: + image: registry.io/tf-runner:xyz +``` + +You can use [`runner.Dockerfile`](https://github.com/weaveworks/tf-controller/blob/main/runner.Dockerfile) as a basis of customizing runner pod image. + +## Customize Runner Pod Specifications + +You can also customize various Runner Pod `spec` fields to control and configure how the Runner Pod runs. +For example, you can configure Runner Pod `spec` affinity and tolerations if you need to run in on a specific set of nodes. Please see [RunnerPodSpec](https://weaveworks.github.io/tf-controller/References/terraform/#infra.contrib.fluxcd.io/v1alpha1.RunnerPodSpec) for a list of the configurable Runner Pod `spec` fields. diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/depends-on.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/depends-on.mdx new file mode 100644 index 0000000000..0031130b03 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/depends-on.mdx @@ -0,0 +1,98 @@ +--- +title: Dependency Management +sidebar_position: 2 +hide_title: true +--- + +# Dependency Management + +TF-controller supports GitOps dependency management. The GitOps dependency management feature +is based on the Kustomization controller of Flux. + +This means that you can use TF-controller to provision resources that depend on other resources at the GitOps level. +For example, you can use TF-controller to provision an S3 bucket, and then use TF-controller +to provision another resource to configure ACL for that bucket. + +## Create a Terraform object + +Similar to the same feature in the Kustomization controller, the dependency management feature is enabled +by setting the `dependsOn` field in the `Terraform` object. The `dependsOn` field is a list of +`Terraform` objects. + +First, create a `Terraform` object to provision the S3 bucket, name it `aws-s3-bucket`. +The S3 bucket is provisioned by the Terraform module `aws_s3_bucket` in the OCI image `aws-package-v4.33.0`. +It is configured to use the `auto-apply` mode, and write outputs to the secret `aws-s3-bucket-outputs`. + +```yaml hl_lines="20-24" +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: aws-s3-bucket + namespace: flux-system +spec: + path: aws_s3_bucket + values: + bucket: my-tf-controller-test-bucket + tags: + Environment: Dev + Name: My bucket + sourceRef: + kind: OCIRepository + name: aws-package-v4.33.0 + approvePlan: auto + interval: 2m + destroyResourcesOnDeletion: true + writeOutputsToSecret: + name: aws-s3-bucket-outputs + outputs: + - arn + - bucket + runnerPodTemplate: + spec: + envFrom: + - secretRef: + name: aws-credentials +``` + +Second, create a `Terraform` object to configure ACL for the S3 bucket, name it `aws-s3-bucket-acl`. +The ACL is provisioned by the Terraform module `aws_s3_bucket_acl`, also from the OCI image `aws-package-v4.33.0`. + +In the `dependsOn` field, specify the `Terraform` object that provisions the S3 bucket. +This means that the ACL will be configured only after the S3 bucket is provisioned, and has its outputs Secret written. +We can read the outputs of the S3 bucket from the Secret `aws-s3-bucket-outputs`, by specifying the `spec.readInputsFromSecrets` field. +The `spec.readInputsFromSecrets` field is a list of Secret objects. +Its `name` field is the name of the Secret, and its `as` field is the name of variable that can be used in the `spec.values` block. + +For example, the `spec.values.bucket` field in the `aws-s3-bucket-acl` Terraform object is set to `${{ .aws_s3_bucket.bucket }}`. + +Please note that we use `${{` and `}}` as the delimiters for the variable name, instead of the Helm default ones, `{{` and `}}`. + +```yaml hl_lines="11 18 20-21" +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: aws-s3-bucket-acl + namespace: flux-system +spec: + path: aws_s3_bucket_acl + values: + acl: private + bucket: ${{ .aws_s3_bucket.bucket }} + sourceRef: + kind: OCIRepository + name: aws-package-v4.33.0 + approvePlan: auto + interval: 3m + dependsOn: + - name: aws-s3-bucket + readInputsFromSecrets: + - name: aws-s3-bucket-outputs + as: aws_s3_bucket + runnerPodTemplate: + spec: + envFrom: + - secretRef: + name: aws-credentials +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/drift-detection.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/drift-detection.mdx new file mode 100644 index 0000000000..c9cff5c46c --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/drift-detection.mdx @@ -0,0 +1,51 @@ +--- +title: Drift Detection +sidebar_position: 2 +hide_title: true +--- + +# Drift Detection + +## Detect drifts without plan or apply + +We can set `.spec.approvePlan` to `disable` to tell the controller to detect drifts of your +Terraform resources only. Doing so will skip the `plan` and `apply` stages. + +```yaml hl_lines="7" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: hello-world + namespace: flux-system +spec: + approvePlan: disable + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +--- + +## Disable drift detection + +Drift detection is enabled by default. You can set `.spec.disableDriftDetection: true` to disable it. + +```yaml hl_lines="8" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + disableDriftDetection: true + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/modules.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/modules.mdx new file mode 100644 index 0000000000..e9d5bfffda --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/modules.mdx @@ -0,0 +1,68 @@ +--- +title: Primitive Modules +sidebar_position: 2 +hide_title: true +--- + +# Primitive Modules + +This document describes how to use the Weave TF-controller with a primitive module. +It requires TF-controller v0.13+ to run the example. + +## What is a primitive module? + +It's a Terraform module that contains only a single resource. + + * A Terraform primitive module must contains the "values" variable. + * The "values" variable must be an object with fields of optional types. + * The module must be placed under a directory, which is named after the resource. + * The directory can optionally contain other files, for example the .terraform.lock.hcl. + * We call a set of primitive modules bundled into an OCI image, a package. + +## Hello World Primitive Module + +Here is an example of how a primitive module can be defined in YAML. +Assume that we have a ready-to-use OCI image with a primitive module for the imaginary resource `aws_hello_world`, +and the image is tagged as `ghcr.io/tf-controller/hello-primitive-modules/v4.32.0:v1`. + +We'll use the following Terraform object definition to provision the resource. + +1. We need to create a Terraform object with the `spec.sourceRef.kind` field +set to `OCIRepository` and the `spec.sourceRef.name` field set to the name of the OCIRepository object. + +2. We need to set the `spec.path` field to the name of the resource, in this case `aws_hello_world`. + +3. We need to set the `spec.values` field to the values of the resource. This is a YAML object that +will be converted to an HCL variable, and passed to the Terraform module. + +4. We need to set the `spec.approvePlan` field to `auto` to automatically approve the plan. + +```yaml hl_lines="19-25" +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: hello-package-v4.32.0 + namespace: flux-system +spec: + interval: 30s + url: oci://ghcr.io/tf-controller/hello-primitive-modules/v4.32.0 + ref: + tag: v1 +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: hello-world + namespace: flux-system +spec: + path: aws_hello_world + values: + greeting: Hi + subject: my world + sourceRef: + kind: OCIRepository + name: hello-package-v4.32.0 + interval: 1h0m + approvePlan: auto +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/output-data.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/output-data.mdx new file mode 100644 index 0000000000..21f53f2676 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/output-data.mdx @@ -0,0 +1,85 @@ +--- +title: Output Data +sidebar_position: 2 +hide_title: true +--- + +# Output Data + +Outputs created by Terraform can be written to a secret using `.spec.writeOutputsToSecret`. + +## Write all outputs + +We can specify a target secret in `.spec.writeOutputsToSecret.name`, and the controller will write all outputs to the secret by default. + +```yaml hl_lines="14-15" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + writeOutputsToSecret: + name: helloworld-output +``` + +## Write outputs selectively + +We can choose only a subset of outputs by specify output names we'd like to write in the `.spec.writeOutputsToSecret.outputs` array. + +```yaml hl_lines="16-18" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + writeOutputsToSecret: + name: helloworld-output + outputs: + - hello_world + - my_sensitive_data +``` + +## Rename outputs + +Some time we'd like to use rename an output, so that it can be consumed by other Kubernetes controllers. +For example, we might retrieve a key from a Secret manager, and it's an AGE key, which must be ending with ".agekey" in the secret. In this case, we need to rename the output. + +TF-controller supports mapping output name using the `old_name:new_name` format. + +In the following example, we renamed `age_key` output as `age.agekey` entry for the `helloworld-output` Secret's data, so that other components in the GitOps pipeline could consume it. + +```yaml hl_lines="16-17" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + writeOutputsToSecret: + name: helloworld-output + outputs: + - age_key:age.agekey +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/provision.mdx b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/provision.mdx new file mode 100644 index 0000000000..628983f56b --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/Using Terraform CRD/provision.mdx @@ -0,0 +1,228 @@ +--- +title: Provision TF Resources +sidebar_position: 1 +hide_title: true +--- + +# Provision Terraform Resources + +## Set variables for resources + +:::warning +**BREAKING CHANGE**: This is a breaking change of the `v1alpha1` API. +::: + +Users who are upgrading from TF-controller <= 0.7.0 require updating `varsFrom`, +from a single object: + +```yaml hl_lines="2" + varsFrom: + kind: ConfigMap + name: cluster-config +``` + +to be an array of object, like this: + +```yaml hl_lines="2" + varsFrom: + - kind: ConfigMap + name: cluster-config +``` + +--- + +### **vars** and **varsFrom** + +You can pass variables to Terraform using the `vars` and `varsFrom` fields. + +Inline variables can be set using `vars`. The `varsFrom` field accepts a list of ConfigMaps / Secrets. +You may use the `varsKeys` property of `varsFrom` to select specific keys from the input or omit this field +to select all keys from the input source. + +Note that in the case of the same variable key being passed multiple times, the controller will use +the lattermost instance of the key passed to `varsFrom`. + +```yaml hl_lines="15-20 22-28" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + vars: + - name: region + value: us-east-1 + - name: env + value: dev + - name: instanceType + value: t3-small + varsFrom: + - kind: ConfigMap + name: cluster-config + varsKeys: + - nodeCount + - instanceType + - kind: Secret + name: cluster-creds +``` + +### Variable value as HCL + +The `vars` field supports HCL string, number, bool, object and list types. For example, the following variable can be populated using the accompanying Terraform spec: + +```hcl hl_lines="3-6" +variable "cluster_spec" { + type = object({ + region = string + env = string + node_count = number + public = bool + }) +} +``` + +```yaml hl_lines="17-20" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system + vars: + - name: cluster_spec + value: + region: us-east-1 + env: dev + node_count: 10 + public: false +``` + +## Auto approve resources + +To provision resources with TF-controller, you need to create a `Terraform` object and a Flux source object, +such as a `GitRepository` or `OCIRepository` object. + +### Create a Terraform object + +The `Terraform` object is a Kubernetes custom resource definition (CRD) object. +It is the core object of TF-controller. + +It defines the Terraform module, the backend configuration, and the GitOps automation mode. + +The Terraform module is a Terraform configuration that can be used to provision resources. +It can be placed inside a Git repository, or packaged as an OCI image in an OCI registry. + +The backend configuration is the configuration for the Terraform backend to be used to store the Terraform state. +It is optional. If not specified, the Kubernetes backend will be used by default. + +### GitOps automation mode + +the GitOps automation mode is the GitOps automation mode to be used to run the Terraform module. +It is optional. If not specified, the "plan-and-manually-apply" mode will be used by default. +In this example, we use the "auto-apply" mode. + +The following is an example of a `Terraform` object: + +```yaml hl_lines="8" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld +spec: + path: ./helloworld + interval: 10m + approvePlan: auto + sourceRef: + kind: GitRepository + name: helloworld +``` + +## Manually apply resources + +Assume that you have a `GitRepository` object named `helloworld` pointing to a Git repository, and you want to plan and apply the Terraform resources under `./` of that Git repo. + +For the plan & manual approval workflow, please start by either setting `.spec.approvePlan` to be the blank value, or omitting the field. + +```yaml hl_lines="7" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: "" # or you can omit this field + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +Then after a reconciliation loop, the controller will generate a plan, and tell you how to use field `.spec.approvePlan` to approve the plan. +You can run the following command to obtain that message. + +```bash +kubectl -n flux-system get tf/helloworld +``` + +After making change and push, it will apply the plan to create real resources. + +```yaml hl_lines="7" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: hello-world + namespace: flux-system +spec: + approvePlan: plan-main-b8e362c206 # first 8 digits of a commit hash is enough + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +## Destroying resources + +The resources provisioned by a Terraform object are not destroyed by default, and the tfstate of that Terraform object still remains in the cluster. + +It means that you are safe to delete the Terraform object in the cluster and re-create it. +If you re-create a new Terraform object with the same name, namespace and workspace, it will continue to use the tfstate inside the cluster as the starting point to reconcile. + +However, you may want to destroy provisioned resources when delete the Terraform object in many scenarios. +To enable destroy resources on object deletion, set `.spec.destroyResourcesOnDeletion` to `true`. + +~> **WARNING:** This feature will destroy your resources on the cloud if the Terraform object gets deleted. Please use it with cautions. + +```yaml hl_lines="8" +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + approvePlan: auto + destroyResourcesOnDeletion: true + interval: 1m + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/_category_.json b/website/versioned_docs/version-0.12.0/terraform/_category_.json new file mode 100644 index 0000000000..7b9baf1891 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Terraform", + "position": 5 +} + \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/aws-eks.mdx b/website/versioned_docs/version-0.12.0/terraform/aws-eks.mdx new file mode 100644 index 0000000000..9f866e5e91 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/aws-eks.mdx @@ -0,0 +1,38 @@ +--- +title: Configure AWS EKS +sidebar_position: 3 +hide_title: true +--- + +# Configure for AWS Elastic Kubernetes Service (EKS) + +AWS Elastic Kubernetes Service (EKS) offers IAM Roles for Service Accounts (IRSA) as a mechanism by which to provide +credentials to Kubernetes pods. This can be used to provide the required AWS credentials to Terraform runners +for performing plans and applies. + +You can use `eksctl` to associate an OIDC provider with your EKS cluster, for example: + +```shell +eksctl utils associate-iam-oidc-provider --cluster CLUSTER_NAME --approve +``` + +Then follow the instructions [here](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html) +to add a trust policy to the IAM role which grants the necessary permissions for Terraform. +Please note that if you have installed the controller following the README, then the `namespace:serviceaccountname` +will be `flux-system:tf-runner`. You'll obtain a Role ARN to use in the next step. + +Finally, annotate the ServiceAccount for the `tf-runner` with the obtained Role ARN in your cluster: + +```shell +kubectl annotate -n flux-system serviceaccount tf-runner eks.amazonaws.com/role-arn=ROLE_ARN +``` + +If deploying the `tf-controller` via Helm, this can be accomplished as follows: + +```yaml hl_lines="5" +values: + runner: + serviceAccount: + annotations: + eks.amazonaws.com/role-arn: ROLE_ARN +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/backup-and-restore.mdx b/website/versioned_docs/version-0.12.0/terraform/backup-and-restore.mdx new file mode 100644 index 0000000000..6f4aedd2b9 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/backup-and-restore.mdx @@ -0,0 +1,49 @@ +--- +title: Backup and Restore State +sidebar_position: 2 +hide_title: true +--- + +# Backup and restore Terraform state + +## Backup the tfstate + +Assume that we have the `my-stack` Terraform object with its `.spec.workspace` set to "default". + +```bash +kubectl get terraform + +NAME READY STATUS AGE +my-stack Unknown Initializing 28s +``` + +We can backup its tfstate out of the cluster, like this: + +```bash +WORKSPACE=default +NAME=my-stack + +kubectl get secret tfstate-${WORKSPACE}-${NAME} \ + -ojsonpath='{.data.tfstate}' \ + | base64 -d | gzip -d > terraform.tfstate +``` + +## Restore the tfstate + +To restore the tfstate file or import an existing tfstate file to the cluster, we can use the following operation: + +```bash +gzip terraform.tfstate + +WORKSPACE=default +NAME=my-stack + +kubectl create secret \ + generic tfstate-${WORKSPACE}-${NAME} \ + --from-file=tfstate=terraform.tfstate.gz \ + --dry-run=client -o=yaml \ + | yq e '.metadata.annotations["encoding"]="gzip"' - \ + > tfstate-${WORKSPACE}-${NAME}.yaml + +kubectl apply -f tfstate-${WORKSPACE}-${NAME}.yaml +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/get-started.mdx b/website/versioned_docs/version-0.12.0/terraform/get-started.mdx new file mode 100644 index 0000000000..d257ae369d --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/get-started.mdx @@ -0,0 +1,104 @@ +--- +title: Get Started +sidebar_position: 1 +hide_title: true +--- + +# Get Started with the Terraform Controller + +## Preflight Checks + +Here are the requirements you need to set up before you start: + + 1. Flux v0.32.0 or later (not only the CLI, but also the controllers on the cluster). If you are not sure about the Flux version on your cluster, please re-bootstrap your cluster. + 2. TF-controller uses **the Controller/Runner architecture**. The Controller acts as a client, and talks to each Runner's Pod via gRPC. Please make sure + 1. **Each Runner's Pod in each Namespace** is allowed to open, and serve at **port 30000** (the gRPC port of a Runner), and the Controller can connect to it. + 2. **The Controller** needs to download tar.gz BLOBs from the **Source controller** via **port 80**. + 3. **The Controller** needs to post the events to the **Notification controller** via **port 80**. + +## Installation + +Before using TF-controller, you have to install Flux by using either `flux install` or `flux bootstrap` command. +Please note that TF-controller now requires **Flux v0.32.0** or later, so please make sure you have the latest version of Flux. +After that you can install TF-controller with Flux HelmRelease by: + +```shell +kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml +``` + +For the most recent release candidate of TF-controller, please use [rc.yaml](https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/rc.yaml). + +```shell +kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/rc.yaml +``` + +or manually with Helm by: + +```shell +# Add tf-controller helm repository +helm repo add tf-controller https://weaveworks.github.io/tf-controller/ + +# Install tf-controller +helm upgrade -i tf-controller tf-controller/tf-controller \ + --namespace flux-system +``` + +For details on configurable parameters of the TF-controller chart, +please see [chart readme](https://github.com/weaveworks/tf-controller/tree/main/charts/tf-controller#tf-controller-for-flux). + +Alternatively, you can install TF-controller via `kubectl`: + +```shell +export TF_CON_VER=v0.12.0 +kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.crds.yaml +kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.rbac.yaml +kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.deployment.yaml +``` + +## Quick start + +Here's a simple example of how to GitOps your Terraform resources with TF-controller and Flux. + +### Define source + +First, we need to define a Source controller's source (`GitRepository`, `Bucket`, `OCIRepository`), for example: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: GitRepository +metadata: + name: helloworld + namespace: flux-system +spec: + interval: 30s + url: https://github.com/tf-controller/helloworld + ref: + branch: main +``` + +### The GitOps Automation mode + +The GitOps automation mode could be enabled by setting `.spec.approvePlan=auto`. In this mode, Terraform resources will be planned, +and automatically applied for you. + +```yaml +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld + namespace: flux-system +spec: + interval: 1m + approvePlan: auto + path: ./ + sourceRef: + kind: GitRepository + name: helloworld + namespace: flux-system +``` + +For a full list of features and how to use them, please follow the [terraform section](../overview) in our docs. + +## Other Examples + * A Terraform GitOps with Flux to automatically reconcile your [AWS IAM Policies](https://github.com/tf-controller/aws-iam-policies). + * GitOps an existing EKS cluster, by partially import its nodegroup and manage it with TF-controller: [An EKS scaling example](https://github.com/tf-controller/eks-scaling). \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/oci-artifact.mdx b/website/versioned_docs/version-0.12.0/terraform/oci-artifact.mdx new file mode 100644 index 0000000000..3402a87370 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/oci-artifact.mdx @@ -0,0 +1,52 @@ +--- +title: OCI Artifact +sidebar_position: 2 +hide_title: true +--- + +# OCI Artifact as Source + +To use OCI artifacts as the source of Terraform objects, you need Flux 2 version **v0.32.0** or higher. + +Assuming that you have Terraform files (your root module may contain sub-modules) under ./modules, +you can use Flux CLI to create an OCI artifact for your Terraform modules +by running the following commands: + +```bash +flux push artifact oci://ghcr.io/tf-controller/helloworld:$(git rev-parse --short HEAD) \ + --path="./modules" \ + --source="$(git config --get remote.origin.url)" \ + --revision="$(git branch --show-current)/$(git rev-parse HEAD)" + +flux tag artifact oci://ghcr.io/tf-controller/helloworld:$(git rev-parse --short HEAD) \ + --tag main +``` + +Then you define a source (`OCIRepository`), and use it as the `sourceRef` of your Terraform object. + +```yaml hl_lines="5 20-22" +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: helloworld-oci +spec: + interval: 1m + url: oci://ghcr.io/tf-controller/helloworld + ref: + tag: main +--- +apiVersion: infra.contrib.fluxcd.io/v1alpha1 +kind: Terraform +metadata: + name: helloworld-tf-oci +spec: + path: ./ + approvePlan: auto + interval: 1m + sourceRef: + kind: OCIRepository + name: helloworld-oci + writeOutputsToSecret: + name: helloworld-outputs +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/overview.mdx b/website/versioned_docs/version-0.12.0/terraform/overview.mdx new file mode 100644 index 0000000000..ac8423ad6a --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/overview.mdx @@ -0,0 +1,65 @@ +--- +title: Overview +sidebar_position: 0 +hide_title: true +--- + +# Overview + +TF-controller is a controller for Weave GitOps that follows the patterns established +by [Flux](https://fluxcd.io) to reconcile Terraform resources in the GitOps way. +With the power of Weave GitOps together with Terraform, the tf-controller allows you to GitOps-ify infrastructure, +and application resources, in the Kubernetes and Terraform universe, at your own pace. + +"At your own pace" means you don't need to GitOps-ify everything at once. + +TF-controller offers many GitOps models: + + 1. **GitOps Automation Model:** GitOps your Terraform resources from the provision steps to the enforcement steps, like a whole EKS cluster. + 2. **Hybrid GitOps Automation Model:** GitOps parts of your existing infrastructure resources. For example, you have an existing EKS cluster. + You can choose to GitOps only its nodegroup, or its security group. + 3. **State Enforcement Model:** You have a TFSTATE file, and you'd like to use GitOps enforce it, without changing anything else. + 4. **Drift Detection Model:** You have a TFSTATE file, and you'd like to use GitOps just for drift detection, so you can decide to do things later when a drift occurs. + +To get started, follow the [getting started](../get-started) guide. + +## Features + + * **Multi-Tenancy**: TF-controller supports multi-tenancy by running Terraform `plan` and `apply` inside Runner Pods. + When specifying `.metadata.namespace` and `.spec.serviceAccountName`, the Runner Pod uses the specified ServiceAccount + and runs inside the specified Namespace. These settings enable the soft multi-tenancy model, which can be used within + the Flux multi-tenancy setup. _This feature is available since v0.9.0._ + * **GitOps Automation for Terraform**: With setting `.spec.approvePlan=auto`, it allows a `Terraform` object + to be reconciled and act as the representation of your Terraform resources. The TF-controller uses the spec of + the `Terraform` object to perform `plan`, `apply` its associated Terraform resources. It then stores + the `TFSTATE` of the applied resources as a `Secret` inside the Kubernetes cluster. After `.spec.interval` passes, + the controller performs drift detection to check if there is a drift occurred between your live system, + and your Terraform resources. If a drift occurs, the plan to fix that drift will be generated and applied automatically. + _This feature is available since v0.3.0._ + * **Drift detection**: This feature is a part of the GitOps automation feature. The controller detects and fixes drift + for your infrastructures, based on the Terraform resources and their `TFSTATE`. _This feature is available since v0.5.0._ + * Drift detection is enabled by default. You can use the field `.spec.disableDriftDetection` to disable this behaviour. + _This feature is available since v0.7.0._ + * The Drift detection only mode, without plan or apply steps, allows you to perform read-only drift detection. + _This feature is available since v0.8.0._ + * **Plan and Manual Approve**: This feature allows you to separate the `plan`, out of the `apply` step, just like + the Terraform workflow you are familiar with. A good thing about this is that it is done in a GitOps way. When a plan + is generated, the controller shows you a message like **'set approvePlan: "plan-main-123" to apply this plan.'**. + You make change to the field `.spec.approvePlan`, commit and push to tell the TF-controller to apply the plan for you. + With this GitOps workflow, you can optionally create and push this change to a new branch for your team member to + review and approve too. _This feature is available since v0.6.0._ + * **First-class YAML-based Terraform**: The `Terraform` object in v0.13.0+ allows you to better configure your + Terraform resources via YAMLs, but without introducing any extra CRDs to your cluster. Together with a new generator + called **Tofu-Jet**, we'll now be able to ship pre-generated primitive Terraform modules for all major cloud providers. + A primitive Terraform module is a module that only contains a single primitive resource, like `aws_iam_role`, or `aws_iam_policy`. + With this concept, we would be able to use Terraform without writing Terraform codes, and make it more GitOps-friendly at the same time. + _This feature is available since v0.13.0._ + * **Enterprise Dashboard Support:** with Weave GitOps Enterprise v0.9.6 you are now able to manage `Terraform` objects the same way you can + with `Kustomization` and `HelmReleases`. + +## Dependencies + +| Version | Terraform | Source Controller | Flux v2 | +|:-----------:|:---------:|:-----------------:|:-------:| +| v0.13.0-rc | v1.3.1 | v0.30.0 | v0.35.x | +| **v0.12.0** | v1.1.9 | v0.26.1 | v0.32.x | diff --git a/website/versioned_docs/version-0.12.0/terraform/terraform-enterprise.mdx b/website/versioned_docs/version-0.12.0/terraform/terraform-enterprise.mdx new file mode 100644 index 0000000000..61d53585e2 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/terraform-enterprise.mdx @@ -0,0 +1,43 @@ +--- +title: Terraform Enterprise +sidebar_position: 3 +hide_title: true +--- + +# Terraform Enterprise + +## Terraform Enterprise Integration + +Starting from v0.9.5, Weave GitOps tf-controller officially supports integration to **Terraform Cloud (TFC)** and +**Terraform Enterprise (TFE)**. Here are the steps to set up tf-controller for your TFE instance. + +![](./tfe_integration_01.png) + +### Terraform Login + +First, you need to obtain an API token from your TFE. You can use `terraform login` command to do so. + +```shell +terraform login tfe.dev.example.com +``` + +Then you can find your API token inside `$HOME/.terraform.d/credentials.tfrc.json`. +Content of the file will look like this: + +```json +{ + "credentials": { + "tfe.dev.example.com": { + "token": "mXXXXXXXXX.atlasv1.ixXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + } + } +} +``` + +### Prepare an TFRC file +TF-controller accepts an TFRC file in the HCL format. So you have to prepare `terraform.tfrc` file using contents from above. +```hcl +credentials "tfe.dev.example.com" { + token = "mXXXXXXXXX.atlasv1.ixXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" +} +``` \ No newline at end of file diff --git a/website/versioned_docs/version-0.12.0/terraform/tfctl.mdx b/website/versioned_docs/version-0.12.0/terraform/tfctl.mdx new file mode 100644 index 0000000000..f57bdbc608 --- /dev/null +++ b/website/versioned_docs/version-0.12.0/terraform/tfctl.mdx @@ -0,0 +1,44 @@ +--- +title: Terraform Controller CLI +sidebar_position: 3 +hide_title: true +--- + +# Terraform Controller CLI + +`tfctl` is a command-line utility to help with tf-controller operations. + +:::note +We are planning on migrating these features into the Weave GitOps CLI. +::: + +## Installation + +You can download the `tfctl` binary via the GitHub releases page: [https://github.com/weaveworks/tf-controller/releases](https://github.com/weaveworks/tf-controller/releases) + +``` +Usage: + tfctl [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + create Create a Terraform resource + delete Delete a Terraform resource + get Get Terraform resources + help Help about any command + install Install the tf-controller + plan Plan a Terraform configuration + reconcile Trigger a reconcile of the provided resource + resume Resume reconciliation for the provided resource + suspend Suspend reconciliation for the provided resource + uninstall Uninstall the tf-controller + version Prints tf-controller and tfctl version information + +Flags: + -h, --help help for tfctl + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -n, --namespace string The kubernetes namespace to use for CLI requests. (default "flux-system") + --terraform string The location of the terraform binary. (default "/usr/bin/terraform") + +Use "tfctl [command] --help" for more information about a command. +``` diff --git a/website/versioned_docs/version-0.12.0/terraform/tfe_integration_01.png b/website/versioned_docs/version-0.12.0/terraform/tfe_integration_01.png new file mode 100644 index 0000000000..bf007cacdc Binary files /dev/null and b/website/versioned_docs/version-0.12.0/terraform/tfe_integration_01.png differ diff --git a/website/versioned_sidebars/version-0.12.0-sidebars.json b/website/versioned_sidebars/version-0.12.0-sidebars.json new file mode 100644 index 0000000000..caea0c03ba --- /dev/null +++ b/website/versioned_sidebars/version-0.12.0-sidebars.json @@ -0,0 +1,8 @@ +{ + "tutorialSidebar": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/website/versions.json b/website/versions.json index 0dcfb3a67e..4901e60151 100644 --- a/website/versions.json +++ b/website/versions.json @@ -1,4 +1,5 @@ [ + "0.12.0", "0.11.0", "0.10.2", "0.10.1",