add gcore provider for webhook

This commit is contained in:
maxim 2022-03-24 13:04:26 +03:00
parent be0eb944c4
commit 49bc4803fd
30 changed files with 2105 additions and 635 deletions

View file

@ -1,7 +1,7 @@
OS ?= $(shell go env GOOS)
ARCH ?= $(shell go env GOARCH)
IMAGE_NAME := "webhook"
IMAGE_NAME := "ghcr.io/g-core/cert-manager-webhook-gcore"
IMAGE_TAG := "latest"
OUT := $(shell pwd)/_out
@ -9,11 +9,17 @@ OUT := $(shell pwd)/_out
KUBE_VERSION=1.21.2
$(shell mkdir -p "$(OUT)")
export TEST_ASSET_ETCD=_test/kubebuilder/bin/etcd
export TEST_ASSET_KUBE_APISERVER=_test/kubebuilder/bin/kube-apiserver
export TEST_ASSET_KUBECTL=_test/kubebuilder/bin/kubectl
test: _test/kubebuilder
clean:
rm -Rf $(OUT)/kubebuilder
install-tools:
sh ./scripts/fetch-test-binaries.sh
test: clean install-tools _test/kubebuilder
TEST_ASSET_ETCD=_test/kubebuilder/bin/etcd \
TEST_ASSET_KUBE_APISERVER=_test/kubebuilder/bin/kube-apiserver \
TEST_ASSET_KUBECTL=_test/kubebuilder/bin/kubectl \
go test -v .
_test/kubebuilder:
@ -32,6 +38,9 @@ clean-kubebuilder:
build:
docker build -t "$(IMAGE_NAME):$(IMAGE_TAG)" .
push:
docker push "$(IMAGE_NAME):$(IMAGE_TAG)"
.PHONY: rendered-manifest.yaml
rendered-manifest.yaml:
helm template \

16
OWNERS
View file

@ -1,16 +0,0 @@
approvers:
- munnerz
- joshvanl
- meyskens
- wallrj
- jakexks
- maelvls
- irbekrm
reviewers:
- munnerz
- joshvanl
- meyskens
- wallrj
- jakexks
- maelvls
- irbekrm

235
README.md
View file

@ -1,54 +1,221 @@
# ACME webhook example
# ACME Webhook for G-Core
The ACME issuer type supports an optional 'webhook' solver, which can be used
to implement custom DNS01 challenge solving logic.
Table of Contents
=================
This is useful if you need to use cert-manager with a DNS provider that is not
officially supported in cert-manager core.
* [Installation](#installation)
* [Cert Manager](#cert-manager)
* [The Webhook](#the-webhook)
* [Issuer](#issuer)
* [Secret](#secret)
* [ClusterIssuer](#clusterissuer)
* [Development](#development)
* [Running the test suite](#running-the-test-suite)
* [Generate the container image](#generate-the-container-image)
## Why not in core?
## Installation
As the project & adoption has grown, there has been an influx of DNS provider
pull requests to our core codebase. As this number has grown, the test matrix
has become un-maintainable and so, it's not possible for us to certify that
providers work to a sufficient level.
### Cert Manager
By creating this 'interface' between cert-manager and DNS providers, we allow
users to quickly iterate and test out new integrations, and then packaging
those up themselves as 'extensions' to cert-manager.
Follow the [instructions](https://cert-manager.io/docs/installation/) using the cert manager documentation to install it within your cluster.
On kubernetes (>= 1.21), the process is pretty straightforward if you use the following commands:
```bash
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml
```
**NOTES**: Check the cert-manager releases note to verify which [version of certmanager](https://cert-manager.io/docs/installation/supported-releases/) is supported with Kubernetes or OpenShift
### The Webhook
We can also then provide a standardised 'testing framework', or set of
conformance tests, which allow us to validate the a DNS provider works as
expected.
- Install next the helm chart if [helm v3 is deployed](https://helm.sh/docs/intro/install/) on your machine
```bash
helm install -n cert-manager gcore-webhook ./deploy/helm
```
**NOTE**: The kubernetes resources used to install the Webhook should be deployed within the same namespace as the cert-manager.
## Creating your own webhook
- To change one of the values, create a `my-values.yml` file or set the value(s) using helm's `--set` argument:
```bash
helm install -n cert-manager gcore-webhook --set pod.securePort=8443 ./deploy/helm
```
Webhook's themselves are deployed as Kubernetes API services, in order to allow
administrators to restrict access to webhooks with Kubernetes RBAC.
- To uninstall the webhook:
```bash
$ helm delete gcore-webhook -n cert-manager
```
This is important, as otherwise it'd be possible for anyone with access to your
webhook to complete ACME challenge validations and obtain certificates.
- Alternatively, you can install the webhook using the list of the kubernetes resources. The namespace
used to install the resources is `cert-manager`
```bash
kubectl apply -f deploy/webhook-all.yml --validate=false
```
To make the set up of these webhook's easier, we provide a template repository
that can be used to get started quickly.
## Issuer
### Creating your own repository
In order to communicate with gcore DNS provider, we will create a Kubernetes Secret
to store the gcore `API` and `gcore Secret`.
Next, we will define a `ClusterIssuer` containing the information to access the ACME Letsencrypt Server
and the DNS provider to be used
### Secret
- Create your Permanent API token by guide https://gcorelabs.com/blog/permanent-api-token-explained/
- Create a `Secret` containing as key parameter the concatenation of the gcore Api and Secret separated by ":"
```yaml
cat <<EOF > secret.yml
apiVersion: v1
kind: Secret
metadata:
name: gcore-api-key
type: Opaque
stringData:
token: permanent_api_token
EOF
```
- Next, deploy it under the namespace where you would like to get your certificate/key signed by the ACME CA Authority
```bash
kubectl apply -f secret.yml -n <NAMESPACE>
```
### ClusterIssuer
- Create a `ClusterIssuer`resource to specify the address of the ACME staging or production server to access.
Add the DNS01 Solver Config that this webhook will use to communicate with the API of the gcore Server in order to create
or delete an ACME Challenge TXT record that the DNS Provider will accept/refuse if the domain name exists.
```yaml
cat <<EOF > clusterissuer.yml
EOF apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# ACME Server
# prod : https://acme-v02.api.letsencrypt.org/directory
# staging : https://acme-staging-v02.api.letsencrypt.org/directory
server: <URL_ACME_SERVER>
# ACME Email address
email: <ACME_EMAIL>
privateKeySecretRef:
name: letsencrypt-<ENV> # staging or production
solvers:
- selector:
dnsNames:
- '*.example.com'
dns01:
webhook:
config:
apiKeySecretRef:
name: gcore-api-key
key: token
production: true
ttl: 600
groupName: acme.mycompany.com
solverName: gcore
EOF
```
- Next, install it on your kubernetes cluster
```bash
kubectl apply -f clusterissuer.yml
```
- Next, create for each of your domain where you need a signed certificate from the Letsencrypt authority the following certificate
```yaml
cat <<EOF > certificate.yml
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-example-com
spec:
secretName: wildcard-example-com-tls
renewBefore: 240h
dnsNames:
- '*.example.com'
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
EOF
```
- Deploy it
```bash
kubectl apply -f certificate.yml -n <NAMESPACE>
```
- If you have deployed a NGinx Ingress Controller on Kubernetes in order to route the trafic to your service
and to manage the TLS termination, then deploy the following ingress resource where
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: example-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
tls:
- hosts:
- '*.example.com'
secretName: wildcard-example-com-tls
rules:
- host: demo.example.com
http:
paths:
- path: /
backend:
serviceName: backend-service
servicePort: 80
```
- Deploy it
```bash
kubectl apply -f ingress.yml -n <NAMESPACE>
```
**NOTE**: If you prefer to delegate to the certmanager the responsibility to create the Certificate resource, then add the following annotation as described within the documentation ` certmanager.k8s.io/cluster-issuer: "letsencrypt-prod"`
## Development
### Running the test suite
All DNS providers **must** run the DNS01 provider conformance testing suite,
else they will have undetermined behaviour when used with cert-manager.
**IMPORTANT**: Use the tetsuite carefully and do not launch it too much times as the DNS servers could fail and report such a message `suite.go:62: error waiting for record to be deleted: unexpected error from DNS server: SERVFAIL`
**It is essential that you configure and run the test suite when creating a
DNS01 webhook.**
To test one of your registered domains on gcore, create a secret.yml file using as [example] file(./testdata/gcore/apikey.yaml)
Replace the token value with your G-Core API permanent token which corresponds to your `345...`
An example Go test file has been provided in [main_test.go](https://github.com/jetstack/cert-manager-webhook-example/blob/master/main_test.go).
You can run the test suite with:
Install a kube-apiserver, etcd locally using the following bash script
```bash
$ TEST_ZONE_NAME=example.com. make test
./scripts/fetch-test-binaries.sh
```
The example file has a number of areas you must fill in and replace with your
own options in order for tests to pass.
Now, execute the test suite and pass as parameter the domain name to be tested
```bash
TEST_ASSET_ETCD=_out/kubebuilder/bin/etcd \
TEST_ASSET_KUBECTL=_out/kubebuilder/bin/kubectl \
TEST_ASSET_KUBE_APISERVER=_out/kubebuilder/bin/kube-apiserver \
TEST_ZONE_NAME=<YOUR_DOMAIN.NAME>. go test -v .
```
or the following `make` command
```bash
TEST_ZONE_NAME=<YOUR_DOMAIN.NAME> make test
```
**IMPORTANT**: As gcore server could be very slow to reply, it could be needed to increase the TTL defined within the `config.json` file. The test could also fail
as the kube api server is currently finalizing the deletion of the namespace `"spec":{"finalizers":["kubernetes"]},"status":{"phase":"Terminating"}}`
### Generate the container image
- Verify first that you have access to a docker server running on your kubernetes or openshift cluster ;-)
- Compile the project locally (to check if no go error are reported)
```bash
make build
```
**NOTE**: Change the `IMAGE_NAME` to point to your container repository where you have access
You can also use the `Makefile` to build/push the container image and pass as parameters the `IMAGE_NAME` and `IMAGE_TAG`. Without `IMAGE_TAG` defined,
docker will tag/push as `latest`
```bash
make build
make push
```

View file

@ -1,76 +0,0 @@
---
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "example-webhook.selfSignedIssuer" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selfSigned: {}
---
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "example-webhook.rootCACertificate" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
secretName: {{ include "example-webhook.rootCACertificate" . }}
duration: 43800h # 5y
issuerRef:
name: {{ include "example-webhook.selfSignedIssuer" . }}
commonName: "ca.example-webhook.cert-manager"
isCA: true
---
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "example-webhook.rootCAIssuer" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ca:
secretName: {{ include "example-webhook.rootCACertificate" . }}
---
# Finally, generate a serving certificate for the webhook to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "example-webhook.servingCertificate" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
secretName: {{ include "example-webhook.servingCertificate" . }}
duration: 8760h # 1y
issuerRef:
name: {{ include "example-webhook.rootCAIssuer" . }}
dnsNames:
- {{ include "example-webhook.fullname" . }}
- {{ include "example-webhook.fullname" . }}.{{ .Release.Namespace }}
- {{ include "example-webhook.fullname" . }}.{{ .Release.Namespace }}.svc

View file

@ -1,90 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "example-webhook.fullname" . }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
---
# Grant the webhook permission to read the ConfigMap containing the Kubernetes
# apiserver's requestheader-ca-certificate.
# This ConfigMap is automatically created by the Kubernetes apiserver.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "example-webhook.fullname" . }}:webhook-authentication-reader
namespace: kube-system
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "example-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
# apiserver gets the auth-delegator role to delegate auth decisions to
# the core apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "example-webhook.fullname" . }}:auth-delegator
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "example-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
# Grant cert-manager permission to validate using our apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "example-webhook.fullname" . }}:domain-solver
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- {{ .Values.groupName }}
resources:
- '*'
verbs:
- 'create'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "example-webhook.fullname" . }}:domain-solver
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "example-webhook.fullname" . }}:domain-solver
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ .Values.certManager.serviceAccountName }}
namespace: {{ .Values.certManager.namespace }}

View file

@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "example-webhook.fullname" . }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: https
protocol: TCP
name: https
selector:
app: {{ include "example-webhook.name" . }}
release: {{ .Release.Name }}

View file

@ -1,43 +0,0 @@
# The GroupName here is used to identify your company or business unit that
# created this webhook.
# For example, this may be "acme.mycompany.com".
# This name will need to be referenced in each Issuer's `webhook` stanza to
# inform cert-manager of where to send ChallengePayload resources in order to
# solve the DNS01 challenge.
# This group name should be **unique**, hence using your own company's domain
# here is recommended.
groupName: acme.mycompany.com
certManager:
namespace: cert-manager
serviceAccountName: cert-manager
image:
repository: mycompany/webhook-image
tag: latest
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 443
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -19,3 +19,4 @@
.project
.idea/
*.tmproj
.vscode/

View file

@ -1,5 +1,5 @@
apiVersion: v1
appVersion: "1.0"
appVersion: "0.1.1"
description: A Helm chart for Kubernetes
name: example-webhook
name: gcore-webhook
version: 0.1.0

View file

@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "example-webhook.name" -}}
{{- define "gcore-webhook.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "example-webhook.fullname" -}}
{{- define "gcore-webhook.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
@ -27,22 +27,38 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "example-webhook.chart" -}}
{{- define "gcore-webhook.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "example-webhook.selfSignedIssuer" -}}
{{ printf "%s-selfsign" (include "example-webhook.fullname" .) }}
{{/*
Common labels
*/}}
{{- define "gcore-webhook.labels" -}}
app.kubernetes.io/name: {{ include "gcore-webhook.name" . }}
helm.sh/chart: {{ include "gcore-webhook.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "example-webhook.rootCAIssuer" -}}
{{ printf "%s-ca" (include "example-webhook.fullname" .) }}
{{/*
PKI
*/}}
{{- define "gcore-webhook.selfSignedIssuer" -}}
{{ printf "%s-selfsign" (include "gcore-webhook.fullname" .) }}
{{- end -}}
{{- define "example-webhook.rootCACertificate" -}}
{{ printf "%s-ca" (include "example-webhook.fullname" .) }}
{{- define "gcore-webhook.rootCAIssuer" -}}
{{ printf "%s-ca" (include "gcore-webhook.fullname" .) }}
{{- end -}}
{{- define "example-webhook.servingCertificate" -}}
{{ printf "%s-webhook-tls" (include "example-webhook.fullname" .) }}
{{- define "gcore-webhook.rootCACertificate" -}}
{{ printf "%s-ca" (include "gcore-webhook.fullname" .) }}
{{- end -}}
{{- define "gcore-webhook.servingCertificate" -}}
{{ printf "%s-webhook-tls" (include "gcore-webhook.fullname" .) }}
{{- end -}}

View file

@ -3,17 +3,14 @@ kind: APIService
metadata:
name: v1alpha1.{{ .Values.groupName }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{ include "gcore-webhook.labels" . | indent 4 }}
annotations:
cert-manager.io/inject-ca-from: "{{ .Release.Namespace }}/{{ include "example-webhook.servingCertificate" . }}"
cert-manager.io/inject-ca-from: "{{ .Release.Namespace }}/{{ include "gcore-webhook.servingCertificate" . }}"
spec:
group: {{ .Values.groupName }}
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: {{ include "example-webhook.fullname" . }}
name: {{ include "gcore-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
version: v1alpha1

View file

@ -1,25 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "example-webhook.fullname" . }}
name: {{ include "gcore-webhook.fullname" . }}
labels:
app: {{ include "example-webhook.name" . }}
chart: {{ include "example-webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{ include "gcore-webhook.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ include "example-webhook.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ include "gcore-webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "example-webhook.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ include "gcore-webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ include "example-webhook.fullname" . }}
serviceAccountName: {{ include "gcore-webhook.fullname" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
@ -27,12 +28,13 @@ spec:
args:
- --tls-cert-file=/tls/tls.crt
- --tls-private-key-file=/tls/tls.key
- --secure-port={{ default 443 .Values.pod.securePort }}
env:
- name: GROUP_NAME
value: {{ .Values.groupName | quote }}
ports:
- name: https
containerPort: 443
containerPort: {{ default 443 .Values.pod.securePort }}
protocol: TCP
livenessProbe:
httpGet:
@ -49,20 +51,20 @@ spec:
mountPath: /tls
readOnly: true
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- toYaml .Values.resources | nindent 12 }}
volumes:
- name: certs
secret:
secretName: {{ include "example-webhook.servingCertificate" . }}
secretName: {{ include "gcore-webhook.servingCertificate" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,64 @@
---
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "gcore-webhook.selfSignedIssuer" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
spec:
selfSigned: {}
---
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "gcore-webhook.rootCACertificate" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
spec:
secretName: {{ include "gcore-webhook.rootCACertificate" . }}
duration: 43800h # 5y
issuerRef:
name: {{ include "gcore-webhook.selfSignedIssuer" . }}
commonName: "ca.gcore-webhook.cert-manager"
isCA: true
---
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "gcore-webhook.rootCAIssuer" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
spec:
ca:
secretName: {{ include "gcore-webhook.rootCACertificate" . }}
---
# Finally, generate a serving certificate for the webhook to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "gcore-webhook.servingCertificate" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
spec:
secretName: {{ include "gcore-webhook.servingCertificate" . }}
duration: 8760h # 1y
issuerRef:
name: {{ include "gcore-webhook.rootCAIssuer" . }}
dnsNames:
- {{ include "gcore-webhook.fullname" . }}
- {{ include "gcore-webhook.fullname" . }}.{{ .Release.Namespace }}
- {{ include "gcore-webhook.fullname" . }}.{{ .Release.Namespace }}.svc

View file

@ -0,0 +1,140 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gcore-webhook.fullname" . }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
---
# Grant the webhook permission to read the ConfigMap containing the Kubernetes
# apiserver's requestheader-ca-certificate.
# This ConfigMap is automatically created by the Kubernetes apiserver.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gcore-webhook.fullname" . }}:webhook-authentication-reader
namespace: kube-system
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "gcore-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
# apiserver gets the auth-delegator role to delegate auth decisions to
# the core apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "gcore-webhook.fullname" . }}:auth-delegator
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "gcore-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
# Grant cert-manager permission to validate using our apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "gcore-webhook.fullname" . }}:domain-solver
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
rules:
- apiGroups:
- {{ .Values.groupName }}
resources:
- '*'
verbs:
- 'create'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "gcore-webhook.fullname" . }}:domain-solver
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "gcore-webhook.fullname" . }}:domain-solver
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ .Values.certManager.serviceAccountName }}
namespace: {{ .Values.certManager.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "gcore-webhook.fullname" . }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "gcore-webhook.fullname" . }}
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "gcore-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "gcore-webhook.fullname" . }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
rules:
- apiGroups:
- ''
resources:
- 'secrets'
verbs:
- 'get'
---
# Grant cert-manager-webhook-gandi permission to read the flow control mechanism (APF)
# API Priority and Fairness is enabled by default in Kubernetes 1.20
# https://kubernetes.io/docs/concepts/cluster-administration/flow-control/
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "gcore-webhook.fullname" . }}:flowcontrol-solver
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
rules:
- apiGroups:
- "flowcontrol.apiserver.k8s.io"
resources:
- "prioritylevelconfigurations"
- "flowschemas"
verbs:
- "list"
- "watch"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "gcore-webhook.fullname" . }}:flowcontrol-solver
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "gcore-webhook.fullname" . }}:flowcontrol-solver
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "gcore-webhook.fullname" . }}
namespace: {{ .Release.Namespace }}

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "gcore-webhook.fullname" . }}
labels:
{{ include "gcore-webhook.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: https
protocol: TCP
name: https
selector:
app.kubernetes.io/name: {{ include "gcore-webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

35
deploy/helm/values.yaml Normal file
View file

@ -0,0 +1,35 @@
# Default values for gcore-webhook.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: ghcr.io/g-core/cert-manager-webhook-gcore
tag: latest
pullPolicy: IfNotPresent
pod:
securePort:
groupName: acme.mycompany.com
certManager:
namespace: cert-manager
serviceAccountName: cert-manager
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 443
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}

325
deploy/webhook-all.yml Normal file
View file

@ -0,0 +1,325 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook
namespace: cert-manager
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gcore-webhook
subjects:
- kind: ServiceAccount
name: gcore-webhook
namespace: cert-manager
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook:webhook-authentication-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: gcore-webhook
namespace: cert-manager
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook:domain-solver
rules:
- apiGroups:
- acme.mycompany.com
resources:
- '*'
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: gcore-webhook
namespace: cert-manager
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook:domain-solver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gcore-webhook:domain-solver
subjects:
- kind: ServiceAccount
name: cert-manager
namespace: cert-manager
---
# Grant cert-manager-webhook-gcore permission to read the flow control mechanism (APF)
# API Priority and Fairness is enabled by default in Kubernetes 1.20
# https://kubernetes.io/docs/concepts/cluster-administration/flow-control/
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gcore-webhook:flowcontrol-solver
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
rules:
- apiGroups:
- "flowcontrol.apiserver.k8s.io"
resources:
- "prioritylevelconfigurations"
- "flowschemas"
verbs:
- "list"
- "watch"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gcore-webhook:flowcontrol-solver
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gcore-webhook:flowcontrol-solver
subjects:
- apiGroup: ""
kind: ServiceAccount
name: gcore-webhook
namespace: cert-manager
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook
namespace: cert-manager
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/name: gcore-webhook
sessionAffinity: None
type: ClusterIP
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook-ca
namespace: cert-manager
spec:
ca:
secretName: gcore-webhook-ca
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook-selfsign
namespace: cert-manager
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook-ca
namespace: cert-manager
spec:
commonName: ca.gcore-webhook.cert-manager
duration: 43800h0m0s
isCA: true
issuerRef:
name: gcore-webhook-selfsign
secretName: gcore-webhook-ca
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook-webhook-tls
namespace: cert-manager
spec:
dnsNames:
- gcore-webhook
- gcore-webhook.cert-manager
- gcore-webhook.cert-manager.svc
duration: 8760h0m0s
issuerRef:
name: gcore-webhook-ca
secretName: gcore-webhook-webhook-tls
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: gcore-webhook
namespace: cert-manager
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: gcore-webhook
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: gcore-webhook
app.kubernetes.io/name: gcore-webhook
spec:
containers:
- args:
- --tls-cert-file=/tls/tls.crt
- --tls-private-key-file=/tls/tls.key
env:
- name: GROUP_NAME
value: acme.mycompany.com
image: ghcr.io/g-core/cert-manager-webhook-gcore:latest
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: gcore-webhook
ports:
- containerPort: 443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tls
name: certs
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: gcore-webhook
serviceAccountName: gcore-webhook
terminationGracePeriodSeconds: 30
volumes:
- name: certs
secret:
defaultMode: 420
secretName: gcore-webhook-webhook-tls
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
annotations:
cert-manager.io/inject-ca-from: cert-manager/gcore-webhook-webhook-tls
labels:
app.kubernetes.io/name: gcore-webhook
app.kubernetes.io/version: 0.1.1
name: v1alpha1.acme.mycompany.com
spec:
group: acme.mycompany.com
groupPriorityMinimum: 1000
service:
name: gcore-webhook
namespace: cert-manager
version: v1alpha1
versionPriority: 15

View file

@ -1,69 +0,0 @@
package example
import (
"fmt"
"github.com/miekg/dns"
)
func (e *exampleSolver) handleDNSRequest(w dns.ResponseWriter, req *dns.Msg) {
msg := new(dns.Msg)
msg.SetReply(req)
switch req.Opcode {
case dns.OpcodeQuery:
for _, q := range msg.Question {
if err := e.addDNSAnswer(q, msg, req); err != nil {
msg.SetRcode(req, dns.RcodeServerFailure)
break
}
}
}
w.WriteMsg(msg)
}
func (e *exampleSolver) addDNSAnswer(q dns.Question, msg *dns.Msg, req *dns.Msg) error {
switch q.Qtype {
// Always return loopback for any A query
case dns.TypeA:
rr, err := dns.NewRR(fmt.Sprintf("%s 5 IN A 127.0.0.1", q.Name))
if err != nil {
return err
}
msg.Answer = append(msg.Answer, rr)
return nil
// TXT records are the only important record for ACME dns-01 challenges
case dns.TypeTXT:
e.RLock()
record, found := e.txtRecords[q.Name]
e.RUnlock()
if !found {
msg.SetRcode(req, dns.RcodeNameError)
return nil
}
rr, err := dns.NewRR(fmt.Sprintf("%s 5 IN TXT %s", q.Name, record))
if err != nil {
return err
}
msg.Answer = append(msg.Answer, rr)
return nil
// NS and SOA are for authoritative lookups, return obviously invalid data
case dns.TypeNS:
rr, err := dns.NewRR(fmt.Sprintf("%s 5 IN NS ns.example-acme-webook.invalid.", q.Name))
if err != nil {
return err
}
msg.Answer = append(msg.Answer, rr)
return nil
case dns.TypeSOA:
rr, err := dns.NewRR(fmt.Sprintf("%s 5 IN SOA %s 20 5 5 5 5", "ns.example-acme-webook.invalid.", "ns.example-acme-webook.invalid."))
if err != nil {
return err
}
msg.Answer = append(msg.Answer, rr)
return nil
default:
return fmt.Errorf("unimplemented record type %v", q.Qtype)
}
}

View file

@ -1,68 +0,0 @@
// package example contains a self-contained example of a webhook that passes the cert-manager
// DNS conformance tests
package example
import (
"fmt"
"os"
"sync"
"github.com/jetstack/cert-manager/pkg/acme/webhook"
acme "github.com/jetstack/cert-manager/pkg/acme/webhook/apis/acme/v1alpha1"
"github.com/miekg/dns"
"k8s.io/client-go/rest"
)
type exampleSolver struct {
name string
server *dns.Server
txtRecords map[string]string
sync.RWMutex
}
func (e *exampleSolver) Name() string {
return e.name
}
func (e *exampleSolver) Present(ch *acme.ChallengeRequest) error {
e.Lock()
e.txtRecords[ch.ResolvedFQDN] = ch.Key
e.Unlock()
return nil
}
func (e *exampleSolver) CleanUp(ch *acme.ChallengeRequest) error {
e.Lock()
delete(e.txtRecords, ch.ResolvedFQDN)
e.Unlock()
return nil
}
func (e *exampleSolver) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error {
go func(done <-chan struct{}) {
<-done
if err := e.server.Shutdown(); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
}
}(stopCh)
go func() {
if err := e.server.ListenAndServe(); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(1)
}
}()
return nil
}
func New(port string) webhook.Solver {
e := &exampleSolver{
name: "example",
txtRecords: make(map[string]string),
}
e.server = &dns.Server{
Addr: ":" + port,
Net: "udp",
Handler: dns.HandlerFunc(e.handleDNSRequest),
}
return e
}

View file

@ -1,96 +0,0 @@
package example
import (
"crypto/rand"
"math/big"
"testing"
acme "github.com/jetstack/cert-manager/pkg/acme/webhook/apis/acme/v1alpha1"
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
)
func TestExampleSolver_Name(t *testing.T) {
port, _ := rand.Int(rand.Reader, big.NewInt(50000))
port = port.Add(port, big.NewInt(15534))
solver := New(port.String())
assert.Equal(t, "example", solver.Name())
}
func TestExampleSolver_Initialize(t *testing.T) {
port, _ := rand.Int(rand.Reader, big.NewInt(50000))
port = port.Add(port, big.NewInt(15534))
solver := New(port.String())
done := make(chan struct{})
err := solver.Initialize(nil, done)
assert.NoError(t, err, "Expected Initialize not to error")
close(done)
}
func TestExampleSolver_Present_Cleanup(t *testing.T) {
port, _ := rand.Int(rand.Reader, big.NewInt(50000))
port = port.Add(port, big.NewInt(15534))
solver := New(port.String())
done := make(chan struct{})
err := solver.Initialize(nil, done)
assert.NoError(t, err, "Expected Initialize not to error")
validTestData := []struct {
hostname string
record string
}{
{"test1.example.com.", "testkey1"},
{"test2.example.com.", "testkey2"},
{"test3.example.com.", "testkey3"},
}
for _, test := range validTestData {
err := solver.Present(&acme.ChallengeRequest{
Action: acme.ChallengeActionPresent,
Type: "dns-01",
ResolvedFQDN: test.hostname,
Key: test.record,
})
assert.NoError(t, err, "Unexpected error while presenting %v", t)
}
// Resolve test data
for _, test := range validTestData {
msg := new(dns.Msg)
msg.Id = dns.Id()
msg.RecursionDesired = true
msg.Question = make([]dns.Question, 1)
msg.Question[0] = dns.Question{dns.Fqdn(test.hostname), dns.TypeTXT, dns.ClassINET}
in, err := dns.Exchange(msg, "127.0.0.1:"+port.String())
assert.NoError(t, err, "Presented record %s not resolvable", test.hostname)
assert.Len(t, in.Answer, 1, "RR response is of incorrect length")
assert.Equal(t, []string{test.record}, in.Answer[0].(*dns.TXT).Txt, "TXT record returned did not match presented record")
}
// Cleanup test data
for _, test := range validTestData {
err := solver.CleanUp(&acme.ChallengeRequest{
Action: acme.ChallengeActionCleanUp,
Type: "dns-01",
ResolvedFQDN: test.hostname,
Key: test.record,
})
assert.NoError(t, err, "Unexpected error while cleaning up %v", t)
}
// Resolve test data
for _, test := range validTestData {
msg := new(dns.Msg)
msg.Id = dns.Id()
msg.RecursionDesired = true
msg.Question = make([]dns.Question, 1)
msg.Question[0] = dns.Question{dns.Fqdn(test.hostname), dns.TypeTXT, dns.ClassINET}
in, err := dns.Exchange(msg, "127.0.0.1:"+port.String())
assert.NoError(t, err, "Presented record %s not resolvable", test.hostname)
assert.Len(t, in.Answer, 0, "RR response is of incorrect length")
assert.Equal(t, dns.RcodeNameError, in.Rcode, "Expexted NXDOMAIN")
}
close(done)
}

13
go.mod
View file

@ -1,15 +1,20 @@
module github.com/cert-manager/webhook-example
module github.com/G-Core/cert-manager-webhook-gcore
go 1.17
require (
github.com/jetstack/cert-manager v1.7.0
github.com/miekg/dns v1.1.34
github.com/stretchr/testify v1.7.0
github.com/miekg/dns v1.1.34 // indirect
github.com/stretchr/testify v1.7.1
k8s.io/apiextensions-apiserver v0.23.1
k8s.io/client-go v0.23.1
)
require (
github.com/G-Core/gcore-dns-sdk-go v0.2.0
k8s.io/apimachinery v0.23.1
)
require (
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
@ -83,12 +88,12 @@ require (
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/api v0.23.1 // indirect
k8s.io/apimachinery v0.23.1 // indirect
k8s.io/apiserver v0.23.1 // indirect
k8s.io/component-base v0.23.1 // indirect
k8s.io/klog/v2 v2.30.0 // indirect

933
go.sum

File diff suppressed because it is too large Load diff

246
main.go
View file

@ -1,50 +1,59 @@
package main
import (
"context"
"encoding/json"
"fmt"
"net/url"
"os"
"strings"
"time"
extapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
//"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
dnssdk "github.com/G-Core/gcore-dns-sdk-go"
"github.com/jetstack/cert-manager/pkg/acme/webhook/apis/acme/v1alpha1"
"github.com/jetstack/cert-manager/pkg/acme/webhook/cmd"
certmgrv1 "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
extapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var GroupName = os.Getenv("GROUP_NAME")
const (
providerName = "gcore"
groupNameEnvVar = "GROUP_NAME"
txtType = "TXT"
)
func main() {
if GroupName == "" {
panic("GROUP_NAME must be specified")
groupName := os.Getenv(groupNameEnvVar)
if groupName == "" {
panic(fmt.Sprintf("%s must be specified", groupNameEnvVar))
}
// This will register our custom DNS provider with the webhook serving
// library, making it available as an API under the provided GroupName.
// library, making it available as an API under the provided groupName.
// You can register multiple DNS provider implementations with a single
// webhook, where the Name() method will be used to disambiguate between
// the different implementations.
cmd.RunWebhookServer(GroupName,
&customDNSProviderSolver{},
cmd.RunWebhookServer(groupName,
&gcoreDNSProviderSolver{},
)
}
// customDNSProviderSolver implements the provider-specific logic needed to
// gcoreDNSProviderSolver implements the provider-specific logic needed to
// 'present' an ACME challenge TXT record for your own DNS provider.
// To do so, it must implement the `github.com/jetstack/cert-manager/pkg/acme/webhook.Solver`
// interface.
type customDNSProviderSolver struct {
// If a Kubernetes 'clientset' is needed, you must:
// 1. uncomment the additional `client` field in this structure below
// 2. uncomment the "k8s.io/client-go/kubernetes" import at the top of the file
// 3. uncomment the relevant code in the Initialize method below
// 4. ensure your webhook's service account has the required RBAC role
// assigned to it for interacting with the Kubernetes APIs you need.
//client kubernetes.Clientset
type gcoreDNSProviderSolver struct {
client *kubernetes.Clientset
ttl int
propagationTimeout int
}
// customDNSProviderConfig is a structure that is used to decode into when
// gcoreDNSProviderConfig is a structure that is used to decode into when
// solving a DNS01 challenge.
// This information is provided by cert-manager, and may be a reference to
// additional configuration that's needed to solve the challenge for this
@ -58,14 +67,25 @@ type customDNSProviderSolver struct {
// You should not include sensitive information here. If credentials need to
// be used by your provider here, you should reference a Kubernetes Secret
// resource and fetch these credentials using a Kubernetes clientset.
type customDNSProviderConfig struct {
// Change the two fields below according to the format of the configuration
// to be decoded.
type gcoreDNSProviderConfig struct {
// These fields will be set by users in the
// `issuer.spec.acme.dns01.providers.webhook.config` field.
//Email string `json:"email"`
//APIKeySecretRef v1alpha1.SecretKeySelector `json:"apiKeySecretRef"`
APIKeySecretRef certmgrv1.SecretKeySelector `json:"apiKeySecretRef"`
// +optional. Base url for API requests
ApiUrl string `json:"apiUrl"`
// +optional. Permanent token if you don't want to use a k8s secret
ApiToken string `json:"apiToken"`
// +optional
TTL int `json:"ttl"`
// +optional
Timeout int `json:"timeout"`
// +optional
PropagationTimeout int `json:"propagationTimeout"`
// +optional
PollingInterval int `json:"pollingInterval"`
}
// Name is used as the name for this DNS solver when referencing it on the ACME
@ -74,8 +94,8 @@ type customDNSProviderConfig struct {
// solvers configured with the same Name() **so long as they do not co-exist
// within a single webhook deployment**.
// For example, `cloudflare` may be used as the name of a solver.
func (c *customDNSProviderSolver) Name() string {
return "my-custom-solver"
func (c *gcoreDNSProviderSolver) Name() string {
return providerName
}
// Present is responsible for actually presenting the DNS record with the
@ -83,16 +103,20 @@ func (c *customDNSProviderSolver) Name() string {
// This method should tolerate being called multiple times with the same value.
// cert-manager itself will later perform a self check to ensure that the
// solver has correctly configured the DNS provider.
func (c *customDNSProviderSolver) Present(ch *v1alpha1.ChallengeRequest) error {
cfg, err := loadConfig(ch.Config)
func (c *gcoreDNSProviderSolver) Present(ch *v1alpha1.ChallengeRequest) error {
sdk, err := c.initSDK(ch)
if err != nil {
return err
return fmt.Errorf("init sdk: %w", err)
}
// TODO: do something more useful with the decoded configuration
fmt.Printf("Decoded configuration %v", cfg)
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.propagationTimeout)*time.Second)
defer cancel()
err = c.upsertTxtRecord(ctx, sdk, ch)
if err != nil {
return fmt.Errorf("detect zone: %w", err)
}
// TODO: add code that sets a record in the DNS provider's console
return nil
}
@ -102,8 +126,25 @@ func (c *customDNSProviderSolver) Present(ch *v1alpha1.ChallengeRequest) error {
// value provided on the ChallengeRequest should be cleaned up.
// This is in order to facilitate multiple DNS validations for the same domain
// concurrently.
func (c *customDNSProviderSolver) CleanUp(ch *v1alpha1.ChallengeRequest) error {
// TODO: add code that deletes a record from the DNS provider's console
func (c *gcoreDNSProviderSolver) CleanUp(ch *v1alpha1.ChallengeRequest) error {
sdk, err := c.initSDK(ch)
if err != nil {
return fmt.Errorf("init sdk: %w", err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.propagationTimeout)*time.Second)
defer cancel()
fqdn := strings.Trim(ch.ResolvedFQDN, ".")
zone, err := c.detectZone(ctx, fqdn, sdk)
if err != nil {
return fmt.Errorf("detect zone: %w", err)
}
err = sdk.DeleteRRSet(ctx, zone, fqdn, txtType)
if err != nil {
return fmt.Errorf("delete rrset: %w", err)
}
return nil
}
@ -116,25 +157,118 @@ func (c *customDNSProviderSolver) CleanUp(ch *v1alpha1.ChallengeRequest) error {
// provider accounts.
// The stopCh can be used to handle early termination of the webhook, in cases
// where a SIGTERM or similar signal is sent to the webhook process.
func (c *customDNSProviderSolver) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error {
///// UNCOMMENT THE BELOW CODE TO MAKE A KUBERNETES CLIENTSET AVAILABLE TO
///// YOUR CUSTOM DNS PROVIDER
//cl, err := kubernetes.NewForConfig(kubeClientConfig)
//if err != nil {
// return err
//}
//
//c.client = cl
///// END OF CODE TO MAKE KUBERNETES CLIENTSET AVAILABLE
func (c *gcoreDNSProviderSolver) Initialize(kubeClientConfig *rest.Config, _ <-chan struct{}) error {
cl, err := kubernetes.NewForConfig(kubeClientConfig)
if err != nil {
return fmt.Errorf("client: %w", err)
}
c.client = cl
return nil
}
func (c *gcoreDNSProviderSolver) upsertTxtRecord(ctx context.Context, sdk *dnssdk.Client, ch *v1alpha1.ChallengeRequest) error {
fqdn := strings.Trim(ch.ResolvedFQDN, ".")
zone, err := c.detectZone(ctx, fqdn, sdk)
if err != nil {
return fmt.Errorf("detect zone: %w", err)
}
recordsToAdd := []dnssdk.ResourceRecord{{Content: []interface{}{ch.Key}, Enabled: true}}
rrset, err := sdk.RRSet(ctx, zone, fqdn, txtType)
if err == nil {
rrset.Records = append(rrset.Records, recordsToAdd...)
err = sdk.UpdateRRSet(ctx, zone, fqdn, txtType, rrset)
if err != nil {
return fmt.Errorf("update rrset: %w", err)
}
return nil
}
err = sdk.AddZoneRRSet(ctx,
zone,
fqdn,
txtType,
recordsToAdd,
c.ttl)
if err != nil {
return fmt.Errorf("add rrset: %w", err)
}
return nil
}
func (c *gcoreDNSProviderSolver) initSDK(ch *v1alpha1.ChallengeRequest) (*dnssdk.Client, error) {
cfg, err := loadConfig(ch.Config)
if err != nil {
return nil, fmt.Errorf("load cfg: %w", err)
}
apiFullUrl := cfg.ApiUrl
if apiFullUrl == "" {
apiFullUrl = "https://api.gcorelabs.com/dns"
}
apiURL, err := url.Parse(apiFullUrl)
if err != nil || apiFullUrl == "" {
return nil, fmt.Errorf("parse api url %s: %w", apiFullUrl, err)
}
token := cfg.ApiToken
if token == "" {
token, err = c.extractApiTokenFromSecret(cfg, ch)
if err != nil {
return nil, fmt.Errorf("get token: %w", err)
}
}
sdk := dnssdk.NewClient(dnssdk.PermanentAPIKeyAuth(token), func(client *dnssdk.Client) {
client.BaseURL = apiURL
})
if cfg.Timeout > 0 {
sdk.HTTPClient.Timeout = time.Duration(cfg.Timeout) * time.Second
}
if cfg.TTL == 0 {
cfg.TTL = 300
}
c.ttl = cfg.TTL
if cfg.PropagationTimeout == 0 {
cfg.PropagationTimeout = 60 * 5
}
c.propagationTimeout = cfg.PropagationTimeout
return sdk, nil
}
func (c *gcoreDNSProviderSolver) extractApiTokenFromSecret(
cfg gcoreDNSProviderConfig, ch *v1alpha1.ChallengeRequest) (string, error) {
sec, err := c.client.CoreV1().
Secrets(ch.ResourceNamespace).
Get(context.Background(), cfg.APIKeySecretRef.LocalObjectReference.Name, metaV1.GetOptions{})
if err != nil {
return "", fmt.Errorf("extract secret: %w", err)
}
secBytes, ok := sec.Data[cfg.APIKeySecretRef.Key]
if !ok {
return "", fmt.Errorf("key %s not found in secret \"%s/%s\"",
cfg.APIKeySecretRef.Key,
cfg.APIKeySecretRef.LocalObjectReference.Name,
ch.ResourceNamespace)
}
return string(secBytes), nil
}
func (c *gcoreDNSProviderSolver) detectZone(ctx context.Context, fqdn string, sdk *dnssdk.Client) (string, error) {
lastErr := fmt.Errorf("empty list")
zones := extractAllZones(fqdn)
n := len(zones) - 1
for i := range zones {
dnsZone, err := sdk.Zone(ctx, zones[n-i])
if err == nil {
return dnsZone.Name, nil
}
lastErr = err
}
return "", fmt.Errorf("zone %q not found: %w", fqdn, lastErr)
}
// loadConfig is a small helper function that decodes JSON configuration into
// the typed config struct.
func loadConfig(cfgJSON *extapi.JSON) (customDNSProviderConfig, error) {
cfg := customDNSProviderConfig{}
func loadConfig(cfgJSON *extapi.JSON) (gcoreDNSProviderConfig, error) {
cfg := gcoreDNSProviderConfig{}
// handle the 'base case' where no configuration has been provided
if cfgJSON == nil {
return cfg, nil
@ -145,3 +279,17 @@ func loadConfig(cfgJSON *extapi.JSON) (customDNSProviderConfig, error) {
return cfg, nil
}
func extractAllZones(fqdn string) []string {
parts := strings.Split(strings.Trim(fqdn, "."), ".")
if len(parts) < 3 {
return nil
}
var zones []string
for i := 1; i < len(parts)-1; i++ {
zones = append(zones, strings.Join(parts[i:], "."))
}
return zones
}

View file

@ -3,10 +3,10 @@ package main
import (
"os"
"testing"
"time"
"github.com/jetstack/cert-manager/test/acme/dns"
"github.com/cert-manager/webhook-example/example"
"github.com/stretchr/testify/assert"
)
var (
@ -17,25 +17,51 @@ func TestRunsSuite(t *testing.T) {
// The manifest path should contain a file named config.json that is a
// snippet of valid configuration that should be included on the
// ChallengeRequest passed as part of the test cases.
//
// Uncomment the below fixture when implementing your custom DNS provider
//fixture := dns.NewFixture(&customDNSProviderSolver{},
// dns.SetResolvedZone(zone),
// dns.SetAllowAmbientCredentials(false),
// dns.SetManifestPath("testdata/my-custom-solver"),
// dns.SetBinariesPath("_test/kubebuilder/bin"),
//)
solver := example.New("59351")
fixture := dns.NewFixture(solver,
dns.SetResolvedZone("example.com."),
dns.SetManifestPath("testdata/my-custom-solver"),
dns.SetDNSServer("127.0.0.1:59351"),
dns.SetUseAuthoritative(false),
pollTime, _ := time.ParseDuration("10s")
timeOut, _ := time.ParseDuration("5m")
fixture := dns.NewFixture(&gcoreDNSProviderSolver{},
dns.SetResolvedZone(zone),
dns.SetAllowAmbientCredentials(false),
dns.SetManifestPath("testdata/gcore"),
// Disable the extended test to create several records for the same Record DNS Name
dns.SetStrict(false),
// Increase the poll interval to 10s
dns.SetPollInterval(pollTime),
// Increase the limit from 2 min to 5 min
dns.SetPropagationLimit(timeOut),
)
//need to uncomment and RunConformance delete runBasic and runExtended once https://github.com/cert-manager/cert-manager/pull/4835 is merged
//fixture.RunConformance(t)
fixture.RunBasic(t)
fixture.RunExtended(t)
fixture.RunConformance(t)
}
func Test_extractAllZones(t *testing.T) {
testCases := []struct {
desc string
fqdn string
expected []string
}{
{
desc: "success",
fqdn: "_acme-challenge.my.test.domain.com.",
expected: []string{"my.test.domain.com", "test.domain.com", "domain.com"},
},
{
desc: "empty",
fqdn: "_acme-challenge.com.",
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
got := extractAllZones(test.fqdn)
assert.Equal(t, test.expected, got)
})
}
}

View file

@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -e
#hack_dir=$(dirname ${BASH_SOURCE})
#source ${hack_dir}/common.sh
k8s_version=1.14.1
goarch=amd64
goos="unknown"
if [[ "$OSTYPE" == "linux-gnu" ]]; then
goos="linux"
elif [[ "$OSTYPE" == "darwin"* ]]; then
goos="darwin"
fi
if [[ "$goos" == "unknown" ]]; then
echo "OS '$OSTYPE' not supported. Aborting." >&2
exit 1
fi
tmp_root=./_out
kb_root_dir=$tmp_root/kubebuilder
# Turn colors in this script off by setting the NO_COLOR variable in your
# environment to any value:
#
# $ NO_COLOR=1 test.sh
NO_COLOR=${NO_COLOR:-""}
if [ -z "$NO_COLOR" ]; then
header=$'\e[1;33m'
reset=$'\e[0m'
else
header=''
reset=''
fi
function header_text {
echo "$header$*$reset"
}
# fetch k8s API gen tools and make it available under kb_root_dir/bin.
function fetch_kb_tools {
header_text "fetching tools"
mkdir -p $tmp_root
kb_tools_archive_name="kubebuilder-tools-$k8s_version-$goos-$goarch.tar.gz"
kb_tools_download_url="https://storage.googleapis.com/kubebuilder-tools/$kb_tools_archive_name"
kb_tools_archive_path="$tmp_root/$kb_tools_archive_name"
if [ ! -f $kb_tools_archive_path ]; then
curl -sL ${kb_tools_download_url} -o "$kb_tools_archive_path"
fi
tar -zvxf "$kb_tools_archive_path" -C "$tmp_root/"
}
header_text "using tools"
fetch_kb_tools
header_text "kubebuilder tools (etcd, kubectl, kube-apiserver)used to perform local tests installed under $tmp_root/kubebuilder/bin/"
exit 0

7
testdata/gcore/apikey.yaml vendored Normal file
View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: gcore-api-token
type: Opaque
stringData:
token: 388$8411fec642b1a6b33882fd828ebccc40...

7
testdata/gcore/config.json vendored Normal file
View file

@ -0,0 +1,7 @@
{
"apiKeySecretRef": {
"name": "gcore-api-token",
"key": "token"
},
"ttl": 60
}

View file

@ -1,3 +0,0 @@
# Solver testdata directory
TODO

View file

@ -1 +0,0 @@
{}