kubectl create namespace transparent-proxy
kubectl create secret generic dest-svc-key -n transparent-proxy --from-literal=secret='<credentials>'
deployment:
autoscaling:
http:
horizontal:
# Enables or disables the Horizontal Pod Autoscaler mechanism.
enabled: true
# Upper limit for the number of HTTP Transparent Proxy replicas to which the autoscaler can scale up.
maxReplicaCount: 3
metrics:
# Target value of the average CPU metric across all Transparent HTTP Proxy pods, represented as a percentage of the requested value of the CPU for the pods.
cpuAverageUtilization: 80
# Target value of the average memory metric across all Transparent HTTP Proxy pods, represented as a percentage of the requested value of the memory for the pods.
memoryAverageUtilization: 80
tcp:
horizontal:
# Enables or disables the Horizontal Pod Autoscaler mechanism.
enabled: true
# Upper limit for the number of TCP Transparent Proxy replicas to which the autoscaler can scale up.
maxReplicaCount: 3
metrics:
# Target value of the average CPU metric across all Transparent TCP Proxy pods represented as a percentage of the requested value of the CPU for the pods.
cpuAverageUtilization: 80
# Target value of the average memory metric across all Transparent TCP Proxy pods represented as a percentage of the requested value of the memory for the pods.
memoryAverageUtilization: 80
config:
# Defines the tenant mode in which Transparent Proxy is working in. The option "dedicated" shows that the proxy works in single-tenant mode.
tenantMode: "dedicated"
security:
accessControl:
destinations:
## Defines the scope of Destination CRs.
defaultScope: "clusterWide"
communication:
internal:
# Enables/Disables mTLS communication between the Transparent Proxy micro-components.
# It may be disabled only in test environments or if you want to integrate with a Service mesh like Istio.
encryptionEnabled: true
certManager:
issuerRef:
name: <cert-manager issuer name>
kind: ClusterIssuer
# Certificate properties used by cert-manager's Certificate controller.
certificate:
privateKey:
algorithm: ECDSA
encoding: PKCS8
size: 256
duration: 720h
renewBefore: 120h
manager:
# The interval on which the Transparent Proxy will check for updates in the Destination service instance.
executionIntervalMinutes: 3
integration:
destinationService:
instances:
# The local cluster name of the Destination service instance, which can later be used as a reference in the Destination CR
- name: dest-service-instance
serviceCredentials:
# The key in the Destination service secret resource, which holds the value of the destination service key.
secretKey: secret
# The name of the existing secret, which holds the credentials for the Destination service.
secretName: dest-svc-key
# The namespace of the secret to be used, which holds the credentials for the Destination service.
secretNamespace: transparent-proxy
connectivityProxy:
# The Kubernetes service name + namespace that are associated with the Connectivity Proxy workload.
serviceName: <connectivity proxy service name>.<connectivity proxy namespace>
# The port on which the HTTP interface of the Connectivity Proxy is started.
httpPort: 20003
# The port on which the TCP interface of the Connectivity Proxy is started.
tcpPort: 20004
helm install transparent-proxy oci://registry-1.docker.io/sapse/transparent-proxy --version <version of helm chart> --namespace transparent-proxy -f <path-to-values.yaml>
You should receive a similar to this response:
kubectl get pods -n transparent-proxy
There should be two pods running:
kubectl run perform-hc --image=curlimages/curl -it --rm --restart=Never -- curl -w "\n" 'sap-transp-proxy-int-healthcheck.transparent-proxy/status'
apiVersion: destination.connectivity.api.sap/v1
kind: Destination
metadata:
name: dynamic-destination
namespace: transparent-proxy
spec:
destinationRef:
name: "*"
destinationServiceInstanceName: dest-service-instance
kubectl create -f dynamic-destination.yaml
kubectl get dst dynamic-destination -n transparent-proxy -o yaml
status:
conditions:
- lastUpdateTime: "2024-01-11T11:56:33.605473101Z"
message: Technical connectivity is configured. Kubernetes service with name
dynamic-destination is created.
reason: ConfigurationSuccessful
status: "True"
type: Available
kubectl run curlpod -n transparent-proxy --image=curlimages/curl -n transparent-proxy -i --tty -- sh
curl dynamic-destination -H "X-Destination-Name: <destination-name>"
~ $ curl dynamic-destination/sap/rest/authorization/v2/apps -H "X-Destination-Name: xsuaa-api" -v
* Host dynamic-destination:80 was resolved.
...
> GET /sap/rest/authorization/v2/apps HTTP/1.1
> Host: dynamic-destination
> User-Agent: curl/8.5.0
> Accept: */*
> X-Destination-Name: xsuaa-api
>
< HTTP/1.1 200 OK
...
[{"appid":"auditlog!b3718","serviceinstanceid":"0889a7e7-61d8-41...
~ $ curl dynamic-destination/principal-propagation -H "X-Destination-Name: my-on-premise-system" -H "Authorization: Bearer $TOKEN" -v
* Host dynamic-destination:80 was resolved.
...
* Connected to dynamic-destination (10.104.69.106) port 80
> GET /principal-propagation HTTP/1.1
> Host: dynamic-destination
> User-Agent: curl/8.5.0
> Accept: */*
> X-Destination-Name: my-on-premise-system
> Authorization: Bearer eyJhbGciOiJSUzI1NiIsImprdS...
...
< HTTP/1.1 200 OK
...
Hello Iliyan Videnov!
You must be a registered user to add a comment. If you've already registered, sign in. Otherwise, register and sign in.
User | Count |
---|---|
15 | |
15 | |
13 | |
11 | |
11 | |
9 | |
8 | |
7 | |
7 | |
7 |