Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
Signed-off-by: Yi Chen <[email protected]>
  • Loading branch information
ChenYi015 committed Jul 9, 2024
1 parent 71a7ed7 commit 1a27f89
Show file tree
Hide file tree
Showing 54 changed files with 1,108 additions and 1,127 deletions.
18 changes: 14 additions & 4 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -1,21 +1,31 @@
.github/
.idea/
.vscode/
bin/
charts/
docs/
config/
examples/
hack/
manifest/
sparkctl/sparkctl
sparkctl/sparkctl-linux-amd64
sparkctl/sparkctl-darwin-amd64
spark-docker/
sparkctl/
test/
vendor/
.dockerignore
.DS_Store
.gitignore
.gitlab-ci.yaml
.golangci.yaml
.pre-commit-config.yaml
ADOPTERS.md
CODE_OF_CONDUCT.md
codecov.ymal
CONTRIBUTING.md
cover.out
Dockerfile
LICENSE
OWNERS
PROJECT
README.md
spark-operator
test.sh
4 changes: 2 additions & 2 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ linters-settings:
- default
- prefix(github.com/kubeflow/spark-operator)
depguard:
main:
Main:
files:
- $all
- "!$test"
listMode: Lax
deny:
reflect: Please don't use reflect package
test:
Test:
files:
- $test
listMode: Lax
Expand Down
27 changes: 9 additions & 18 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,35 +16,26 @@

ARG SPARK_IMAGE=spark:3.5.0

FROM golang:1.22-alpine as builder
FROM golang:1.22.0 as builder

WORKDIR /workspace

# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# Cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
COPY . .

# Copy the go source code
COPY api/ api/
COPY cmd/ cmd/
COPY internal/ internal/
COPY pkg/ pkg/

# Build
ARG TARGETARCH
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o /usr/bin/spark-operator cmd/main.go

RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on make build-operator

FROM ${SPARK_IMAGE}

USER root
COPY --from=builder /usr/bin/spark-operator /usr/bin/
RUN apt-get update --allow-releaseinfo-change \
&& apt-get update \

RUN apt-get update \
&& apt-get install -y tini \
&& rm -rf /var/lib/apt/lists/*

COPY --from=builder /workspace/bin/spark-operator /usr/bin/spark-operator

COPY entrypoint.sh /usr/bin/

ENTRYPOINT ["/usr/bin/entrypoint.sh"]
39 changes: 27 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,30 @@ endif
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec

# Version information.
# OPERATOR_VERSION ?= $$(grep appVersion $(SPARK_OPERATOR_CHART_PATH)/Chart.yaml | awk '{print $$2}')
VERSION=v2.0.0
BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%S%:z")
GIT_COMMIT = $(shell git rev-parse HEAD)
GIT_TAG = $(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
GIT_TREE_STATE = $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
GIT_SHA = $(shell git rev-parse --short HEAD || echo "HEAD")
GIT_VERSION = ${VERSION}-${GIT_SHA}

REPO=github.com/kubeflow/spark-operator
SPARK_OPERATOR_GOPATH=/go/src/github.com/kubeflow/spark-operator
SPARK_OPERATOR_CHART_PATH=charts/spark-operator-chart
OPERATOR_VERSION ?= $$(grep appVersion $(SPARK_OPERATOR_CHART_PATH)/Chart.yaml | awk '{print $$2}')
DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'`
BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'`
UNAME:=`uname | tr '[:upper:]' '[:lower:]'`
REPO=github.com/kubeflow/spark-operator
SPARK_OPERATOR_CHART_PATH=charts/spark-operator-chart

# CONTAINER_TOOL defines the container tool to be used for building images.
# Be aware that the target commands are only tested with Docker which is
# scaffolded by default. However, you might want to replace it to use other
# tools. (i.e. podman)
CONTAINER_TOOL ?= docker

OPERATOR_VERSION ?= $$(grep appVersion $(SPARK_OPERATOR_CHART_PATH)/Chart.yaml | awk '{print $$2}')
# Image URL to use all building/pushing image targets
IMAGE_REPOSITORY ?= docker.io/kubeflow/spark-operator
IMAGE_TAG ?= $(OPERATOR_VERSION)
Expand Down Expand Up @@ -93,13 +103,14 @@ go-vet: ## Run go vet against code.

.PHONY: lint
lint: golangci-lint ## Run golangci-lint linter
@echo "Running golangci-lint run..."
$(GOLANGCI_LINT) run

.PHONY: lint-fix
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
@echo "Running golangci-lint run --fix..."
$(GOLANGCI_LINT) run --fix


.PHONY: unit-test
unit-test: envtest ## Run unit tests.
@echo "Running unit tests..."
Expand All @@ -114,9 +125,16 @@ e2e-test: envtest kind-create-cluster ## Run the e2e tests against a Kind k8s in

##@ Build

override LDFLAGS += \
-X ${REPO}.version=${VERSION} \
-X ${REPO}.buildDate=${BUILD_DATE} \
-X ${REPO}.gitCommit=${GIT_COMMIT} \
-X ${REPO}.gitTreeState=${GIT_TREE_STATE} \
-extldflags "-static"

.PHONY: build-operator
build-operator: ## Build Spark operator
go build -o bin/spark-operator cmd/main.go
go build -o bin/spark-operator -ldflags '${LDFLAGS}' cmd/main.go

.PHONY: build-sparkctl
build-sparkctl: ## Build sparkctl binary
Expand Down Expand Up @@ -171,16 +189,13 @@ docker-push: ## Push docker image with the operator.
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
PLATFORMS ?= linux/arm64,linux/amd64
PLATFORMS ?= linux/amd64,linux/arm64
.PHONY: docker-buildx
docker-buildx: ## Build and push docker image for the operator for cross-platform support
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name spark-operator-builder
$(CONTAINER_TOOL) buildx use spark-operator-builder
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE_REPOSITORY}:${IMAGE_TAG} -f Dockerfile.cross .
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE_REPOSITORY}:${IMAGE_TAG} -f Dockerfile .
- $(CONTAINER_TOOL) buildx rm spark-operator-builder
rm Dockerfile.cross

##@ Helm

Expand Down Expand Up @@ -222,11 +237,11 @@ kind-delete-custer: kind ## Delete the created kind cluster.
rm -f $(KIND_KUBE_CONFIG)

.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
install-crd: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f -

.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
uninstall-crd: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -

.PHONY: deploy
Expand Down
8 changes: 4 additions & 4 deletions charts/spark-operator-chart/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application |
| commonLabels | object | `{}` | Common labels to add to the resources |
| controller.ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `controller.uiService.enable` to true. |
| controller.logLevel | int | `1` | Set higher levels for more verbose logging |
| controller.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error` |
| controller.rbac.annotations | object | `{}` | Optional annotations for the controller RBAC resources |
| controller.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the controller |
| controller.replicaCount | int | `1` | Number of replicas of controller, leader election will be enabled if this is greater than 1 |
Expand Down Expand Up @@ -111,9 +111,9 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. |
| prometheus.metrics.enable | bool | `true` | Specifies whether to enable prometheus metrics scraping |
| prometheus.metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint |
| prometheus.metrics.port | int | `10254` | Metrics port |
| prometheus.metrics.port | int | `8080` | Metrics port |
| prometheus.metrics.portName | string | `"metrics"` | Metrics port name |
| prometheus.metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics |
| prometheus.metrics.prefix | string | `""` | Metrics prefix, will be added to all exported metrics |
| prometheus.podMonitor.create | bool | `false` | Specifies whether to create pod monitor. Note that prometheus metrics should be enabled as well. |
| prometheus.podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from |
| prometheus.podMonitor.labels | object | `{}` | Pod monitor labels |
Expand All @@ -133,7 +133,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum
| volumes | list | `[]` | |
| webhook.enable | bool | `true` | Specifies whether to enable webhook server |
| webhook.failurePolicy | string | `"Fail"` | Specifies how unrecognized errors are handled, allowed values are `Ignore` or `Fail`. |
| webhook.logLevel | int | `1` | Set higher levels for more verbose logging |
| webhook.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error` |
| webhook.port | int | `9443` | Specifies webhook port |
| webhook.portName | string | `"webhook"` | Specifies webhook service port name |
| webhook.rbac.annotations | object | `{}` | Optional annotations for the webhook RBAC resources |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,10 @@ spec:
- --enable-batch-scheduler={{ .Values.batchScheduler.enable }}
{{- if .Values.prometheus.metrics.enable }}
- --enable-metrics=true
- --metrics-labels=app_type
- --metrics-port={{ .Values.prometheus.metrics.port }}
- --metrics-bind-address=:{{ .Values.prometheus.metrics.port }}
- --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }}
- --metrics-prefix={{ .Values.prometheus.metrics.prefix }}
- --metrics-labels=app_type
{{- end }}
- --leader-election=true
- --leader-election-lock-name={{ include "spark-operator.controller.leaderElectionName" . }}
Expand Down
51 changes: 43 additions & 8 deletions charts/spark-operator-chart/templates/controller/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,28 +30,45 @@ rules:
- ""
resources:
- pods
- persistentvolumeclaims
verbs:
- "*"
- get
- list
- watch
- create
- update
- patch
- delete
- deletecollection
- apiGroups:
- ""
resources:
- services
- configmaps
verbs:
- create
- get
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- get
- create
- delete
- apiGroups:
- ""
Expand Down Expand Up @@ -85,13 +102,31 @@ rules:
- sparkoperator.k8s.io
resources:
- sparkapplications
- sparkapplications/status
- sparkapplications/finalizers
- scheduledsparkapplications
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- sparkoperator.k8s.io
resources:
- sparkapplications/status
- scheduledsparkapplications/status
verbs:
- get
- update
- patch
- apiGroups:
- sparkoperator.k8s.io
resources:
- sparkapplications/finalizers
- scheduledsparkapplications/finalizers
verbs:
- "*"
- update
{{- if .Values.batchScheduler.enable }}
{{/* required for the `volcano` batch scheduler */}}
- apiGroups:
Expand Down
11 changes: 9 additions & 2 deletions charts/spark-operator-chart/templates/spark/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,18 @@ rules:
- ""
resources:
- pods
- services
- configmaps
- persistentvolumeclaims
- services
verbs:
- "*"
- get
- list
- watch
- create
- update
- patch
- delete
- deletecollection

---
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
9 changes: 9 additions & 0 deletions charts/spark-operator-chart/templates/webhook/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,15 @@ spec:
- --webhook-svc-name={{ include "spark-operator.webhook.serviceName" . }}
- --webhook-svc-namespace={{ .Release.Namespace }}
- --webhook-port={{ .Values.webhook.port }}
- --mutating-webhook-name={{ include "spark-operator.webhook.name" . }}
- --validating-webhook-name={{ include "spark-operator.webhook.name" . }}
{{- if .Values.prometheus.metrics.enable }}
- --enable-metrics=true
- --metrics-bind-address=:{{ .Values.prometheus.metrics.port }}
- --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }}
- --metrics-prefix={{ .Values.prometheus.metrics.prefix }}
- --metrics-labels=app_type
{{- end }}
- --leader-election=true
- --leader-election-lock-name={{ include "spark-operator.webhook.leaderElectionName" . }}
- --leader-election-lock-namespace={{ .Release.Namespace }}
Expand Down
2 changes: 1 addition & 1 deletion charts/spark-operator-chart/templates/webhook/secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ metadata:
labels:
{{- include "spark-operator.webhook.labels" . | nindent 4 }}
annotations:
helm.sh/hook: pre-install
helm.sh/hook: pre-install,pre-upgrade,pre-rollback
helm.sh/hook-delete-policy: before-hook-creation
data:
ca-key.pem: ""
Expand Down
Loading

0 comments on commit 1a27f89

Please sign in to comment.