# Examples
A collection of copy-and-paste-able configurations for various types of clouds, use-cases, and deployments. These files can also be found in the git repository in the docs/configuration/examples/
directory.
TIP
Remember to set your identity provider settings and to generate new secret keys!
# Settings
# Configuration File
# Main configuration flags : https://www.pomerium.io/docs/reference/reference/
#
# address: ":8443" # optional, default is 443
# pomerium_debug: true # optional, default is false
# service: "all" # optional, default is all
# log_level: info # optional, default is debug
authenticate_service_url: https://authenticate.corp.beyondperimeter.com
# authorize service url will default to localhost in all-in-one mode, otherwise
# it should be set to a "behind-the-ingress" routable url
# authorize_service_url: https://pomerium-authorize-service.default.svc.cluster.local
# cache_service_url: https://pomerium-cache-service.default.svc.cluster.local
# Certificates can be loaded as files or base64 encoded bytes.
# certificate_file: "./cert.pem" # optional, defaults to `./cert.pem`
# certificate_key_file: "./privkey.pem" # optional, defaults to `./certprivkey.pem`
# certificate_authority_file: "./cert.pem"
# alternatively, insecure mode can be used if behind a TLS terminating ingress,
# or when using a sidecar proxy
# insecure_server: true
# base64 encoded cert, eg. `base64 -i cert.pem` / `base64 -i privkey.pem`
# certificate: |
# "xxxxxx"
# certificate_key: |
# "xxxx"
# Generate 256 bit random keys e.g. `head -c32 /dev/urandom | base64`
# shared_secret: hsJIQsx9KKx4qVlggg/T3AuLTmVu0uHhwTQgMPlVs7U=
# cookie_secret: WwMtDXWaRDMBQCylle8OJ+w4kLIDIGd8W3cB4/zFFtg=
# If set, a JWT based signature is appended to each request header `x-pomerium-jwt-assertion`
# signing_key: "Replace with base64'd private key from ./scripts/self-signed-sign-key.sh"
# Identity Provider Settings
# Azure
# idp_provider: "azure"
# idp_provider_url: "https://login.microsoftonline.com/REPLACEME/v2.0"
# idp_client_id: "REPLACEME
# idp_client_secret: "REPLACEME"
## GOOGLE
# idp_provider: "google"
# idp_provider_url: "https://accounts.google.com" # optional for google
# idp_client_id: "REPLACEME
# idp_client_secret: "REPLACEME
# IF GSUITE and you want to get user groups you will need to set a service account
# see identity provider docs for gooogle for more info :
# idp_service_account: $(echo '{"impersonate_user": "bdd@pomerium.io"}' | base64)
# OKTA
# idp_provider: "okta"
# idp_client_id: "REPLACEME"
# idp_client_secret: "replaceme"
# idp_provider_url: "https://REPLACEME.oktapreview.com/oauth2/default"
# OneLogin
# idp_provider: "onelogin"
# idp_client_id: "REPLACEME"
# idp_client_secret: "REPLACEME"
# idp_provider_url: "https://openid-connect.onelogin.com/oidc" #optional, defaults to `https://openid-connect.onelogin.com/oidc`
# scope: "openid email" # generally, you want the default OIDC scopes
# Proxied routes and per-route policies are defined in a policy block
policy:
- from: https://httpbin.corp.beyondperimeter.com
to: http://httpbin
allowed_domains:
- pomerium.io
cors_allow_preflight: true
timeout: 30s
- from: https://external-httpbin.corp.beyondperimeter.com
to: https://httpbin.org
allowed_domains:
- gmail.com
- from: https://hello.corp.beyondperimeter.com
to: http://hello:8080
allowed_groups:
- admins@pomerium.io
# Environmental Variables
#!/bin/bash
# Main configuration flags : https://www.pomerium.io/docs/reference/reference/
# Main configuration flags
# export ADDRESS=":8443" # optional, default is 443
# export POMERIUM_DEBUG=true # optional, default is false
# export SERVICE="all" # optional, default is all
# export LOG_LEVEL="info" # optional, default is debug
export AUTHENTICATE_SERVICE_URL=https://authenticate.corp.beyondperimeter.com
# AUTHORIZE_SERVICE_URL service url will default to localhost in all-in-one mode,
# otherwise it should be set to a "behind-the-ingress" routable url
# export AUTHORIZE_SERVICE_URL=https://pomerium-authorize-service.default.svc.cluster.local
# export CACHE_SERVICE_URL=https://pomerium-cache-service.default.svc.cluster.local
# Certificates can be loaded as files or base64 encoded bytes.
# See : https://www.pomerium.io/docs/reference/certificates
export AUTOCERT=TRUE # Use Let's Encrypt to fetch certs. Port 80/443 must be internet accessible.
# export AUTOCERT_DIR="./certs" # The path where you want to place your certificates
# export CERTIFICATE_FILE="xxxx" # optional, defaults to `./cert.pem`
# export CERTIFICATE_KEY_FILE="xxx" # optional, defaults to `./certprivkey.pem`
# export CERTIFICATE="xxx" # base64 encoded cert, eg. `base64 -i cert.pem`
# export CERTIFICATE_KEY="xxx" # base64 encoded key, eg. `base64 -i privkey.pem`
# Generate 256 bit random keys e.g. `head -c32 /dev/urandom | base64`
export SHARED_SECRET="$(head -c32 /dev/urandom | base64)"
export COOKIE_SECRET="$(head -c32 /dev/urandom | base64)"
# If set, a JWT based signature is appended to each request header `x-pomerium-jwt-assertion`
# export SIGNING_KEY="Replace with base64'd private key from ./scripts/self-signed-sign-key.sh"
# Identity Provider Settings
# Azure
# export IDP_PROVIDER="azure"
# export IDP_PROVIDER_URL="https://login.microsoftonline.com/REPLACEME/v2.0"
# export IDP_CLIENT_ID="REPLACEME
# export IDP_CLIENT_SECRET="REPLACEME"
## GOOGLE
export IDP_PROVIDER="google"
export IDP_PROVIDER_URL="https://accounts.google.com" # optional for google
# IF GSUITE and you want to get user groups you will need to set a service account
# see identity provider docs for gooogle for more info :
# export IDP_SERVICE_ACCOUNT=$(echo '{"impersonate_user": "bdd@pomerium.io"}' | base64)
# OKTA
# export IDP_PROVIDER="okta"
# export IDP_CLIENT_ID="REPLACEME"
# export IDP_CLIENT_SECRET="REPLACEME"
# export IDP_PROVIDER_URL="https://REPLACEME.oktapreview.com/oauth2/default"
# OneLogin
# export IDP_PROVIDER="onelogin"
# export IDP_CLIENT_ID="REPLACEME"
# export IDP_CLIENT_SECRET="REPLACEME"
# export IDP_PROVIDER_URL="https://openid-connect.onelogin.com/oidc" #optional, defaults to `https://openid-connect.onelogin.com/oidc`
# export SCOPE="openid email" # generally, you want the default OIDC scopes
# Proxied routes and per-route policies are defined in a policy provided either
# directly as a base64 encoded yaml/json file, or as the policy key in the configuration
# file
export POLICY="$(base64 ./docs/configuration/examples/config/policy.example.yaml)"
# Binary
- Suitable for bare-metal and virtual-machines
- No docker, docker-compose, or kubernetes required
- Minimal configuration
- Pomerium services are run in "all-in-one" mode
- No load balancer required
- Great for testing Pomerium
- Routes default to hosted version of httpbin.org
Customize for your identity provider and run ./bin/pomerium -config config.yaml
# Docker
Uses the latest pomerium build from docker hub. Docker and docker-compose are great tools for standing up and testing multiple service, and containers without having to stand-up a full on cluster.
# All-in-One
- Minimal container-based configuration.
- Docker and Docker-Compose based.
- Runs a single container for all pomerium services
- Routes default to on-premise httpbin.
Customize for your identity provider run docker-compose up -f basic.docker-compose.yml
# basic.docker-compose.yml
version: "3"
services:
pomerium:
image: pomerium/pomerium:latest
environment:
# Generate new secret keys. e.g. `head -c32 /dev/urandom | base64`
- COOKIE_SECRET=V2JBZk0zWGtsL29UcFUvWjVDWWQ2UHExNXJ0b2VhcDI=
volumes:
# Mount your domain's certificates : https://www.pomerium.io/docs/reference/certificates
- ~/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer:/pomerium/cert.pem:ro
- ~/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key:/pomerium/privkey.pem:ro
# Mount your config file : https://www.pomerium.io/docs/reference/reference/
- ../config/config.minimal.yaml:/pomerium/config.yaml:ro
ports:
- 443:443
# https://httpbin.corp.beyondperimeter.com --> Pomerium --> http://httpbin
httpbin:
image: kennethreitz/httpbin:latest
expose:
- 80
# Distinct Services
- Docker and Docker-Compose based.
- Uses pre-configured built-in nginx load balancer
- Runs separate containers for each service
- Routes default to on-premise helloworld, and httpbin.
Customize for your identity provider run docker-compose up -f nginx.docker-compose.yml
# nginx.docker-compose.yml
version: "3"
services:
nginx:
image: pomerium/nginx-proxy:latest
ports:
- "443:443"
volumes:
# NOTE!!! : nginx must be supplied with your wildcard certificates.
# see : https://github.com/jwilder/nginx-proxy#wildcard-certificates
- ~/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer:/etc/nginx/certs/corp.beyondperimeter.com.crt:ro
- ~/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key:/etc/nginx/certs/corp.beyondperimeter.com.key:ro
- /var/run/docker.sock:/tmp/docker.sock:ro
pomerium-authenticate:
image: pomerium/pomerium:latest # or `build: .` to build from source
restart: always
environment:
- SERVICES=authenticate
- INSECURE_SERVER=TRUE
# NOTE!: Replace with your identity provider settings https://www.pomerium.io/docs/identity-providers.html
# - IDP_PROVIDER=google
# - IDP_PROVIDER_URL=https://accounts.google.com
# - IDP_CLIENT_ID=REPLACE_ME
# - IDP_CLIENT_SECRET=REPLACE_ME
# - IDP_SERVICE_ACCOUNT=REPLACE_ME
# NOTE! Generate new secret keys! e.g. `head -c32 /dev/urandom | base64`
# Generated secret keys must match between services
- SHARED_SECRET=aDducXQzK2tPY3R4TmdqTGhaYS80eGYxcTUvWWJDb2M=
- COOKIE_SECRET=V2JBZk0zWGtsL29UcFUvWjVDWWQ2UHExNXJ0b2VhcDI=
# Tell nginx how to proxy pomerium's routes
- VIRTUAL_PROTO=http
- VIRTUAL_HOST=authenticate.corp.beyondperimeter.com
- VIRTUAL_PORT=443
- CACHE_SERVICE_URL=http://pomerium-cache:443
volumes:
- ../config/config.example.yaml:/pomerium/config.yaml:ro
expose:
- 443
pomerium-proxy:
image: pomerium/pomerium:latest # or `build: .` to build from source
restart: always
environment:
- SERVICES=proxy
- INSECURE_SERVER=TRUE
# IMPORTANT! If you are running pomerium behind another ingress (loadbalancer/firewall/etc)
# you must tell pomerium proxy how to communicate using an internal hostname for RPC
- AUTHORIZE_SERVICE_URL=http://pomerium-authorize:443
# When communicating internally, rPC is going to get a name conflict expecting an external
# facing certificate name (i.e. authenticate-service.local vs *.corp.example.com).
- SHARED_SECRET=aDducXQzK2tPY3R4TmdqTGhaYS80eGYxcTUvWWJDb2M=
- COOKIE_SECRET=V2JBZk0zWGtsL29UcFUvWjVDWWQ2UHExNXJ0b2VhcDI=
# Tell nginx how to proxy pomerium's routes
- VIRTUAL_PROTO=http
- VIRTUAL_HOST=*.corp.beyondperimeter.com
- VIRTUAL_PORT=443
volumes:
- ../config/config.example.yaml:/pomerium/config.yaml:ro
expose:
- 443
pomerium-authorize:
image: pomerium/pomerium:latest # or `build: .` to build from source
restart: always
environment:
- SERVICES=authorize
- SHARED_SECRET=aDducXQzK2tPY3R4TmdqTGhaYS80eGYxcTUvWWJDb2M=
- GRPC_INSECURE=TRUE
- GRPC_ADDRESS=:443
volumes:
# Retrieve non-secret config keys from the config file : https://www.pomerium.io/docs/reference/reference/
# See `config.example.yaml` and modify to fit your needs.
- ../config/config.example.yaml:/pomerium/config.yaml:ro
expose:
- 443
pomerium-cache:
image: pomerium/pomerium:latest # or `build: .` to build from source
restart: always
environment:
- SERVICES=cache
- SHARED_SECRET=aDducXQzK2tPY3R4TmdqTGhaYS80eGYxcTUvWWJDb2M=
- GRPC_INSECURE=TRUE
- GRPC_ADDRESS=:443
volumes:
# Retrieve non-secret config keys from the config file : https://www.pomerium.io/docs/reference/reference/
# See `config.example.yaml` and modify to fit your needs.
- ../config/config.example.yaml:/pomerium/config.yaml:ro
expose:
- 443
# https://httpbin.corp.beyondperimeter.com
httpbin:
image: kennethreitz/httpbin:latest
expose:
- 80
# https://hello.corp.beyondperimeter.com
hello:
image: gcr.io/google-samples/hello-app:1.0
expose:
- 8080
# Helm
- HTTPS (TLS) between client, load balancer, and services
- gRPC requests are routed behind the load balancer
- Routes default to hosted version of httpbin.org
- Includes installer script
- Pomerium serves on HTTPS and your ingress controller may need an annotation to connect properly
# GKE
- Uses Google Kubernetes Engine's built-in ingress to do HTTPS load balancing
#!/bin/bash
# PRE-REQ: Install Helm : You should verify the content of this script before running.
# curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
# NOTE! This will create real resources on Google's cloud. Make sure you clean up any unused
# resources to avoid being billed. For reference, this tutorial cost me <10 cents for a couple of hours.
# NOTE! You must change the identity provider client secret setting, and service account setting!
# NOTE! If you are using gsuite, you should also set `authenticate.idp.serviceAccount`, see docs !
echo "=> [GCE] creating cluster"
gcloud container clusters create pomerium --region us-west2 --num-nodes 1
echo "=> [GCE] get cluster credentials so we can use kubctl locally"
gcloud container clusters get-credentials pomerium --region us-west2
echo "=> add pomerium's helm repo"
helm repo add pomerium https://helm.pomerium.io
echo "=> update helm"
helm repo update
echo "=> add bitnami's helm repo"
helm repo add bitnami https://charts.bitnami.com/bitnami
echo "=> install nginx as a sample hello world app"
helm upgrade --install nginx bitnami/nginx --set service.type=ClusterIP
echo "=> install pomerium with helm"
helm install \
pomerium \
pomerium/pomerium \
--set service.type="NodePort" \
--set config.sharedSecret=$(head -c32 /dev/urandom | base64) \
--set config.cookieSecret=$(head -c32 /dev/urandom | base64) \
--set ingress.secret.name="pomerium-tls" \
--set ingress.secret.cert=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer") \
--set ingress.secret.key=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key") \
--values docs/configuration/examples/kubernetes/values.yaml
# When done, clean up by deleting the cluster!
# helm del $(helm ls --all --short) --purge # deletes all your helm instances
# gcloud container clusters delete pomerium # deletes your cluster
# Kubernetes
- Uses Google Kubernetes Engine's built-in ingress to do HTTPS load balancing
- HTTPS (TLS) between client, load balancer, and services
- gRPC requests are routed behind the load balancer
- Routes default to hosted version of httpbin.org
- Includes installer script
# kubernetes_gke
#!/bin/bash
# NOTE! This will create real resources on Google GCP. Make sure you clean up any unused
# resources to avoid being billed.
# For reference, this tutorial cost ~10 cents for a couple of hours.
# NOTE! You must change the identity provider client secret setting in your config file!
echo "=> creating cluster"
gcloud container clusters create pomerium --num-nodes 3 --region us-west2
echo "=> get cluster credentials so we can use kubctl locally"
gcloud container clusters get-credentials pomerium --region us-west2
echo "=> create config from kubernetes-config.yaml which we will mount"
kubectl create configmap config --from-file="config.yaml"="kubernetes-config.yaml"
echo "=> create our random shared-secret and cookie-secret keys as envars"
kubectl create secret generic shared-secret --from-literal=shared-secret=$(head -c32 /dev/urandom | base64)
kubectl create secret generic cookie-secret --from-literal=cookie-secret=$(head -c32 /dev/urandom | base64)
echo "=> initiliaze secrets for TLS wild card for service use"
kubectl create secret generic certificate \
--from-literal=certificate=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer")
kubectl create secret generic certificate-key \
--from-literal=certificate-key=$(base64 -i "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key")
echo "=> load TLS to ingress"
kubectl create secret tls pomerium-tls \
--key "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/*.corp.beyondperimeter.com.key" \
--cert "$HOME/.acme.sh/*.corp.beyondperimeter.com_ecc/fullchain.cer"
echo "=> deploy pomerium proxy, authorize, and authenticate"
kubectl apply -f pomerium-proxy.yml
kubectl apply -f pomerium-authenticate.yml
kubectl apply -f pomerium-authorize.yml
kubectl apply -f pomerium-cache.yml
echo "=> deploy our test app, httpbin"
kubectl apply -f httpbin.yml
echo "=> deploy the GKE specific ingress"
kubectl apply -f ingress.yml
# Alternatively, nginx-ingress can be used
# kubectl apply -f ingress.nginx.yml
# When done, clean up by deleting the cluster!
# gcloud container clusters delete pomerium
# kubernetes-config.yaml
# Main configuration flags : https://www.pomerium.io/docs/reference/reference/
insecure_server: true
grpc_insecure: true
address: ":80"
grpc_address: ":80"
authenticate_service_url: https://authenticate.corp.beyondperimeter.com
authorize_service_url: http://pomerium-authorize-service.default.svc.cluster.local
cache_service_url: http://pomerium-cache-service.default.svc.cluster.local
override_certificate_name: "*.corp.beyondperimeter.com"
idp_provider: google
idp_client_id: REPLACE_ME.apps.googleusercontent.com
idp_client_secret: "REPLACE_ME"
policy:
- from: https://httpbin.corp.beyondperimeter.com
to: http://httpbin.default.svc.cluster.local:8000
allowed_domains:
- gmail.com
# pomerium-authenticate.yml
apiVersion: v1
kind: Service
metadata:
name: pomerium-authenticate-service
spec:
ports:
- port: 80
name: http
selector:
app: pomerium-authenticate
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pomerium-authenticate
labels:
app: pomerium-authenticate
spec:
replicas: 1
selector:
matchLabels:
app: pomerium-authenticate
template:
metadata:
labels:
app: pomerium-authenticate
spec:
containers:
- image: pomerium/pomerium:master
name: pomerium-authenticate
args:
- --config=/etc/pomerium/config.yaml
ports:
- containerPort: 80
name: http
protocol: TCP
env:
- name: SERVICES
value: authenticate
- name: SHARED_SECRET
valueFrom:
secretKeyRef:
name: shared-secret
key: shared-secret
- name: COOKIE_SECRET
valueFrom:
secretKeyRef:
name: cookie-secret
key: cookie-secret
readinessProbe:
httpGet:
path: /ping
port: 80
scheme: HTTP
livenessProbe:
httpGet:
path: /ping
port: 80
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/pomerium/
name: config
volumes:
- name: config
configMap:
name: config
# pomerium-authorize.yml
apiVersion: v1
kind: Service
metadata:
name: pomerium-authorize-service
spec:
ports:
- port: 80
name: grpc
selector:
app: pomerium-authorize
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pomerium-authorize
labels:
app: pomerium-authorize
spec:
replicas: 1
selector:
matchLabels:
app: pomerium-authorize
template:
metadata:
labels:
app: pomerium-authorize
spec:
containers:
- image: pomerium/pomerium:master
name: pomerium-authorize
args:
- --config=/etc/pomerium/config.yaml
ports:
- containerPort: 80
name: grpc
protocol: TCP
env:
- name: SERVICES
value: authorize
- name: SHARED_SECRET
valueFrom:
secretKeyRef:
name: shared-secret
key: shared-secret
readinessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 15
periodSeconds: 20
volumeMounts:
- mountPath: /etc/pomerium/
name: config
volumes:
- name: config
configMap:
name: config
# pomerium-proxy.yml
apiVersion: v1
kind: Service
metadata:
name: pomerium-proxy-service
spec:
ports:
- port: 80
protocol: TCP
name: http
targetPort: http
selector:
app: pomerium-proxy
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pomerium-proxy
labels:
app: pomerium-proxy
spec:
replicas: 1
selector:
matchLabels:
app: pomerium-proxy
template:
metadata:
labels:
app: pomerium-proxy
spec:
containers:
- image: pomerium/pomerium:master
name: pomerium-proxy
args:
- --config=/etc/pomerium/config.yaml
ports:
- containerPort: 80
name: http
protocol: TCP
env:
- name: SERVICES
value: proxy
- name: SHARED_SECRET
valueFrom:
secretKeyRef:
name: shared-secret
key: shared-secret
- name: COOKIE_SECRET
valueFrom:
secretKeyRef:
name: cookie-secret
key: cookie-secret
readinessProbe:
httpGet:
path: /ping
port: 80
scheme: HTTP
livenessProbe:
httpGet:
path: /ping
port: 80
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
volumeMounts:
- mountPath: /etc/pomerium/
name: config
volumes:
- name: config
configMap:
name: config
# pomerium-cache.yml
apiVersion: v1
kind: Service
metadata:
name: pomerium-cache-service
spec:
clusterIP: None # cache is a headless service!
ports:
- port: 80
name: grpc
selector:
app: pomerium-cache
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pomerium-cache
labels:
app: pomerium-cache
spec:
replicas: 1
selector:
matchLabels:
app: pomerium-cache
template:
metadata:
labels:
app: pomerium-cache
spec:
containers:
- image: pomerium/pomerium:master
name: pomerium-cache
args:
- --config=/etc/pomerium/config.yaml
ports:
- containerPort: 80
name: grpc
protocol: TCP
env:
- name: SERVICES
value: cache
- name: SHARED_SECRET
valueFrom:
secretKeyRef:
name: shared-secret
key: shared-secret
readinessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 15
periodSeconds: 20
volumeMounts:
- mountPath: /etc/pomerium/
name: config
volumes:
- name: config
configMap:
name: config
# ingress.yml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: pomerium-ingress
annotations:
kubernetes.io/ingress.allow-http: "false"
kubernetes.io/ingress.global-static-ip-name: pomerium
spec:
tls:
- secretName: pomerium-tls
hosts:
- "*.corp.beyondperimeter.com"
- "authenticate.corp.beyondperimeter.com"
rules:
- host: "*.corp.beyondperimeter.com"
http:
paths:
- paths:
backend:
serviceName: pomerium-proxy-service
servicePort: http
- host: "authenticate.corp.beyondperimeter.com"
http:
paths:
- paths:
backend:
serviceName: pomerium-authenticate-service
servicePort: http
# Istio
- Istio provides mutual TLS via sidecars and to make Istio play well with Pomerium we need to disable TLS on the Pomerium side.
- We need to provide Istio with information on how to route requests via Pomerium to their destinations.
- The following example shows how to make Grafana's auth proxy work with Pomerium inside of an Istio mesh.
# Gateway
We are using the standard istio-ingressgateway that comes configured with Istio and attach a Gateway to it that deals with a subset of our ingress traffic based on the Host header (in this case *.yourcompany.com
). This is the Gateway to which we will later attach VirtualServices for more granular routing decisions. Along with the Gateway, because we care about TLS, we are using Certmanager to provision a self-signed certificate (see Certmanager docs for setup instructions).
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: internal-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 443
protocol: HTTPS
name: https-default
tls:
mode: SIMPLE
serverCertificate: "sds"
privateKey: "sds"
credentialName: internal-cert
hosts:
- *.yourcompany.com
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: internal-cert
namespace: istio-system
spec:
secretName: internal-cert
issuerRef:
name: self-signed-issuer
kind: ClusterIssuer
commonName: *.yourcompany.com
dnsNames:
- *.yourcompany.com
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: self-signed-issuer
spec:
selfSigned: {}
# Virtual Services
Here we are configuring two Virtual Services. One to route from the Gateway to the Authenticate service and one to route from the Gateway to the Pomerium Proxy, which will route the request to Grafana according to the configured Pomerium policy.
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: grafana-virtual-service
namespace: pomerium
spec:
gateways:
- istio-system/internal-gateway
hosts:
- grafana.yourcompany.com
http:
- route:
- destination:
host: pomerium-proxy
---
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: authenticate-virtual-service
namespace: pomerium
spec:
gateways:
- istio-system/internal-gateway
hosts:
- authenticate.yourcompany.com
http:
- route:
- destination:
host: pomerium-authenticate
---
# Service Entry
If you are enforcing mutual TLS in your service mesh you will need to add a ServiceEntry for your identity provider so that Istio knows not to expect a mutual TLS connection with, for example https://yourcompany.okta.com
.
apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
name: external-idp
namespace: pomerium
spec:
hosts:
- yourcompany.okta.com
location: MESH_EXTERNAL
ports:
- number: 443
name: https
protocol: TLS
resolution: DNS
# Pomerium Configuration
For this example we're using the Pomerium Helm chart with the following values.yaml
file. Things to note here are the insecure
flag, where we are disabling TLS in Pomerium in favor of the Istio-provided TLS via sidecars. Also note the extaEnv
arguments where we are asking Pomerium to extract the email property from the JWT and pass it on to Grafana in a header called X-Pomerium-Claim-Email
. We need to do this because Grafana does not know how to read the Pomerium JWT but its auth-proxy authentication method can be configured to read user information from headers. The policy document contains a single route that will send all requests with a host header of https://grafana.yourcompany.com
to the Grafana instance running in the monitoring namespace. We disable ingress because we are using the Istio ingressgateway for ingress traffic and don't need the Pomerium helm chart to create ingress objects for us.
config:
insecure: true
policy:
- from: https://grafana.yourcompany.com
to: "http://prometheus-grafana.monitoring.svc.cluster.local"
timeout: 30s
allowed_domains:
- yourcompany.com
ingress:
enabled: false
extraEnv:
JWT_CLAIMS_HEADERS: email
# Grafana ini
On the Grafana side we are using the Grafana Helm chart and what follows is the relevant section of the values.yml
file. The most important thing here is that we need to tell Grafana from which request header to grab the username. In this case that's X-Pomerium-Claim-Email
because we will be using the user's email (provided by your identity provider) as their username in Grafana. For all the configuration options check out the Grafana documentation about its auth-proxy authentication method.
grafana.ini:
users:
allow_sign_up: false
auto_assign_org: true
auto_assign_org_role: Editor
auth.proxy:
enabled: true
header_name: X-Pomerium-Claim-Email
header_property: username
auto_sign_up: true
sync_ttl: 60
enable_login_token: false
← Settings