Skip to content

Instantly share code, notes, and snippets.

@mjmenger
Last active July 29, 2024 17:51
Show Gist options
  • Save mjmenger/87e0758c65e326a5b307d365c01a2e3d to your computer and use it in GitHub Desktop.
Save mjmenger/87e0758c65e326a5b307d365c01a2e3d to your computer and use it in GitHub Desktop.
snowflakeintegration.md
echo '*** Retrieving App Stack kubeconfig ***'
curl --location -o "$DISTRIBUTED_CLOUD_SITE_NAME.yaml" --request POST "https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/web/namespaces/system/sites/$DISTRIBUTED_CLOUD_SITE_NAME/global-kubeconfigs" \
--header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
--header 'Access-Control-Allow-Origin: *' \
--header "x-volterra-apigw-tenant: $DISTRIBUTED_CLOUD_TENANT" \
--data-raw "{\"expirationTimestamp\":\"$KUBECONFIG_CERT_EXPIRE_DATE\"}"
echo '*** Setting KUBECONFIG environment variable ***'
export KUBECONFIG=$DISTRIBUTED_CLOUD_SITE_NAME.yaml
echo '*** Adding f5businessdevelopment helm repository ***'
helm repo add f5businessdevelopment https://f5businessdevelopment.github.io/helm-charts
helm repo update f5businessdevelopment
echo '*** Deploying frps application on App Stack ***'
kubectl create namespace $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE
helm -n $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE install \
--set-file caCert=certs/ca.crt \
--set-file serverCert=certs/server.crt \
--set-file serverKey=certs/server.key \
--set bindPort=$FRPS_CONTROL_LISTENER_PORT \
--set vhostHTTPPort=$FRPS_HTTP_LISTENER_PORT \
--set vhostHTTPSPort=$FRPS_HTTPS_LISTENER_PORT \
--set authToken=$FRPS_AUTH_TOKEN \
$FRPS_INSTANCE_NAME f5businessdevelopment/f5xc-frps --version 0.0.4
echo '*** Creating origin pool for control port ***'
cat cat templatecontroloriginpool.json | envsubst | tr -d '\n' | \
curl --request POST \
--url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/origin_pools \
--header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
--header 'content-type: application/json' \
--data @-
echo '*** Creating origin pool for http port ***'
cat templatehttporiginpool.json | envsubst | tr -d '\n' | \
curl --request POST \
--url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/origin_pools \
--header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
--header 'content-type: application/json' \
--data @-
echo '*** Creating loadbalancer for control port ***'
cat templatetcploadbalancer.json | envsubst | tr -d '\n' | \
curl --request POST \
--url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/tcp_loadbalancers \
--header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
--header 'content-type: application/json' \
--data @-
echo '*** Creating load balancer for http port ***'
cat templatehttploadbalancer.json | envsubst | tr -d '\n' | \
curl --request POST \
--url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/http_loadbalancers \
--header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
--header 'content-type: application/json' \
--data @-
echo 'TO DO: check on readiness status of control load balancers'
echo '*** Checking readiness of http load balancer ***'
for x in `seq 1 120`; do
site_state=$(curl --location --request GET https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/http_loadbalancers/$FRPS_INSTANCE_NAME-f5xc-frps-https -H "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" -H "content-type: application/json" -s |jq -r .spec.state)
if [ "$site_state" = "VIRTUAL_HOST_READY" ]; then
echo "$site_state: $FRPS_INSTANCE_NAME is ready. Safe to proceed. [$x minutes elapsed]"
echo 'Ready to serve requests from frpc client applications'
exit 0
else
echo "$site_state: wait for $FRPS_INSTANCE_NAME to be VIRTUAL_HOST_READY before proceeding. [$x minutes elapsed]"
#exit 1
fi
sleep 60;
done;
echo "$site_state: wait for $FRPS_INSTANCE_NAME to be ONLINE before proceeding; timed out after 120 minutes"

Integrating an application running in Snowflake's Snowpark Container Services with F5 Distributed Cloud

Overview

Prerequisites

Set up FRP server on App Stack

Create App Stack instance on F5 Distributed Cloud tenant

Create App Stack site

Gather input data

# The tenant name is the found at the beginning of the url when accessing
# the F5 Distributed Cloud console (e.g. *tenantname*.console.ves.volterra.io)
export DISTRIBUTED_CLOUD_TENANT=mytenantname
# find tenant id in the F5 Distributed Cloud GUI at
# Account -> Account Settings -> Tenant Overview -> Tenant ID
export DISTRIBUTED_CLOUD_TENANT_ID=mytenantnamewithextensionfoundintheconsole
# create an API token in the F5 Distributed Cloud GUI at
# Account -> Account Settings -> Credentials -> Add Credentials 
# set Credential Type to API Token, not API Certificate
export DISTRIBUTED_CLOUD_API_TOKEN=myapitoken
# the name of the App Stack site created in the step above
export DISTRIBUTED_CLOUD_SITE_NAME=appstacksitename
# the namespace used in the distributed cloud console for the 
# load balancers and origin pools you will create below
export DISTRIBUTED_CLOUD_NAMESPACE=mydistributedcloudnamespace
# the kubernetes namespace where the frp server deployment, 
# services, and pod will be installed
export DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE=frps
# is this a duplicate of the DISTRIBUTED_CLOUD_SITE_NAME value above?
export DISTRIBUTED_CLOUD_APP_STACK_SITE=myappstacksite
# the DNS zone created as part of the prerequisites
export DISTRIBUTED_CLOUD_DNS_ZONE=xc.f5demo.net
# the port used by the frp server for control 
# this port will be behind a tcp load balancer
export FRPS_CONTROL_LISTENER_PORT=7443
# the port used by the frp server to serve https traffic
# this port will be behind an http load balancer
export FRPS_HTTPS_LISTENER_PORT=8443
# the port used by the frp server to serve http traffic
# this port will be behind an http load balancer
export FRPS_HTTP_LISTENER_PORT=8080
# the token used by the frp client to authenticate with the 
# frp server
export FRPS_AUTH_TOKEN=123456789ABCDEF
# the instance name is used to uniquely name distributed cloud, 
# helm, and kubernetes artifacts created as part of the frp server
# installation
export FRPS_INSTANCE_NAME=myfrps
# used in concert with the DISTRIBUTED_CLOUD_DNS_ZONE to create the 
# public hostname for the https application end point served
# by distributed cloud
export FRPS_APP_NAME=mycleverapp
# adjust the expiry date to a time no more than 90 days in the future
export KUBECONFIG_CERT_EXPIRE_DATE=$(date -d "90 days" +%Y-%m-%dT%H:%M:%S.%NZ)

Get the kube config for the App Stack cluster

Retrieve the kube config file for your App Stack cluster, using the F5 Distributed Cloud REST API.

curl --location -o "$DISTRIBUTED_CLOUD_SITE_NAME.yaml" --request POST "https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/web/namespaces/system/sites/$DISTRIBUTED_CLOUD_SITE_NAME/global-kubeconfigs" \
--header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
--header 'Access-Control-Allow-Origin: *' \
--header "x-volterra-apigw-tenant: $DISTRIBUTED_CLOUD_TENANT" \
--data-raw "{\"expirationTimestamp\":\"$KUBECONFIG_CERT_EXPIRE_DATE\"}"

Note: the kubeconfig is saved to yaml file with the name of your site. $DISTRIBUTED_CLOUD_SITE_NAME.yaml. If you want the file saved in another location, change the parameter of the output flag (-o).

Deploy FRP server using Helm

Set the KUBECONFIG environment variable to the path for the kubeconfig file you downloaded in the previous step.

EXPORT KUBECONFIG=/path/to/yaml/kubeconfig/downloaded/earlier

Set up the helm repository for the charts used in this example.

helm repo add f5businessdevelopment https://f5businessdevelopment.github.io/helm-charts
helm repo update f5businessdevelopment
helm search repo f5businessdevelopment --versions

If you've set up the f5businessdevelopment helm chart repository earlier, you'll see an error message. You can ignore this and continue with helm repo update.

Create the namespace and deploy the application.

kubectl create namespace $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE 

helm -n $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE install --dry-run=client \
     --set-file caCert=certs/ca.crt \
     --set-file serverCert=certs/server.crt \
     --set-file serverKey=certs/server.key \
     --set bindPort=$FRPS_CONTROL_LISTENER_PORT \
     --set vhostHTTPPort=$FRPS_HTTP_LISTENER_PORT \
     --set vhostHTTPSPort=$FRPS_HTTPS_LISTENER_PORT \
     --set authToken=$FRPS_AUTH_TOKEN \
     $FRPS_INSTANCE_NAME f5businessdevelopment/f5xc-frps --version 0.0.4

Note: The helm install above includes the --dry-run=client flag. Inspect the results of the dry-run before running helm install again wtihout the --dry-run=client flag.

At this point, if you haven't seen any errors, the frp server pod and service should be deployed in the kubernetes namespace you created earlier. A brief verification can be done with the following.

kubectl -n $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE get deploy
kubectl -n $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE get pods
kubectl -n $DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE get svc

Next we need to expose the endpoints of the frps server, allowing frp client connections and application content connections.

Configure control channel TCP load balancer

steps to configure a TCP load balancer in front of the frp server's control channel

Create templatecontroloriginpool.json with the following content.

Create templatetcploadbalancer.json with the following content.

Create templatehttporiginpool.json with the following content.

Create the origin pool for the control port, using the F5 Distributed Cloud REST API. You may want to explore the output of cat templatecontroloriginpool.json | envsubst file before running the entire command.

cat templatecontroloriginpool.json | envsubst  | \
curl --request POST \
  --url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/origin_pools \
  --header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
  --header 'content-type: application/json' \
  --data  @-

Create the origin pool for the http port, using the F5 Distributed Cloud REST API. You may want to explore the output of cat templatehttporiginpool.json | envsubst file before running the entire command.

cat templatehttporiginpool.json | envsubst | \
curl --request POST \
  --url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/origin_pools \
  --header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
  --header 'content-type: application/json' \
  --data  @-

Create the tcp load balancer for the control port, using the F5 Distributed Cloud REST API. You may want to explore the output of cat templatetcploadbalancer.json | envsubst file before running the entire command.

cat templatetcploadbalancer.json | envsubst | \
curl --request POST \
  --url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/tcp_loadbalancers \
  --header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
  --header 'content-type: application/json' \
  --data @-

Create the http load balancer for the http port, using the F5 Distributed Cloud REST API. You may want to explore the output of cat templatehttploadbalancer.json | envsubst file before running the entire command.

cat templatehttploadbalancer.json | envsubst | \
curl --request POST \
  --url https://$DISTRIBUTED_CLOUD_TENANT.console.ves.volterra.io/api/config/namespaces/$DISTRIBUTED_CLOUD_NAMESPACE/http_loadbalancers \
  --header "Authorization: APIToken $DISTRIBUTED_CLOUD_API_TOKEN" \
  --header 'content-type: application/json' \
  --data @-

Configure

Set up Snowflake Warehouse

The API needs a warehouse to query the data to return to the caller. To create the database and warehouse, connect to Snowflake and run the following commands in the Snowflake console or using SnowSQL:

USE ROLE ACCOUNTADMIN;
CREATE WAREHOUSE DATA_API_WH WITH WAREHOUSE_SIZE='xsmall';

Create Application Role

The application will run as a new role with minimal privileges. To create the role, connect to Snowflake and run the following SQL statements to create the role and grant it access to the data needed for the application.

USE ROLE ACCOUNTADMIN;
CREATE ROLE DATA_API_ROLE;

GRANT USAGE ON WAREHOUSE DATA_API_WH TO ROLE DATA_API_ROLE;
GRANT IMPORTED PRIVILEGES ON DATABASE SNOWFLAKE_SAMPLE_DATA TO ROLE DATA_API_ROLE;

GRANT ROLE DATA_API_ROLE TO ROLE ACCOUNTADMIN;

Get a sample application and container image

TBD

Create image repository

To create the image registry and the database which contains it, connect to Snowflake and run the following commands in the Snowflake console or using SnowSQL:

USE ROLE ACCOUNTADMIN;
CREATE DATABASE API;

GRANT ALL ON DATABASE API TO ROLE DATA_API_ROLE;
GRANT ALL ON SCHEMA API.PUBLIC TO ROLE DATA_API_ROLE;

USE DATABASE API;
CREATE OR REPLACE IMAGE REPOSITORY API;

GRANT READ ON IMAGE REPOSITORY API TO ROLE DATA_API_ROLE;

SHOW IMAGE REPOSITORIES;

Note the repository_url in the SHOW IMAGE REPOSITORIES response as that will be needed in the next step.

Push image to the repository

Run the following command in the terminal, replacing the repository_url with your repository in the previous step, in Codespaces to login to the container repository. You will be prompted for your Snowflake username and password to login to your repository. Note: the author had to disable Snowflake MFA to use the docker commands below.

docker login <repository_url>
docker build -t <repository_url>/dataapi .
docker push <repository_url>/dataapi

Push the frp client image to the snowflake image repository

Note: Snowflake's Snowpark Container Services does not allow the use of remote image repositories. Consequently, you'll have to pull the frp client image to an intermediate workstation and then push to the Snowpark image repository you created earlier from there.

docker pull ghcr.io/f5businessdevelopment/f5xc-frpc:0.0.5
docker tag ghcr.io/f5businessdevelopment/f5xc-frpc:0.0.5 <repository_url>/f5xc-frpc:0.0.5
docker push <repository_url>/f5xc-frpc:0.0.5

Create the compute pool

To create the compute pool to run the application, connect to Snowflake and run the following command in the Snowflake console or using SnowSQL:

USE ROLE ACCOUNTADMIN;

CREATE COMPUTE POOL API
  MIN_NODES = 1
  MAX_NODES = 5
  INSTANCE_FAMILY = CPU_X64_XS;

GRANT USAGE ON COMPUTE POOL API TO ROLE DATA_API_ROLE;
GRANT MONITOR ON COMPUTE POOL API TO ROLE DATA_API_ROLE;

Add FRP client to application specification in Snowpark Container Services

Create templatesfappdeploy.sql with the content below.

USE ROLE ACCOUNTADMIN;
CREATE OR REPLACE NETWORK RULE F5XC_OUT TYPE=HOST_PORT MODE = EGRESS VALUE_LIST=('$FRPS_INSTANCE_NAME.$DISTRIBUTED_CLOUD_DNS_ZONE:$FRPS_CONTROL_LISTENER_PORT');

CREATE OR REPLACE EXTERNAL ACCESS INTEGRATION F5XC
ALLOWED_NETWORK_RULES = (F5XC_OUT) ENABLED = TRUE;

GRANT USAGE ON INTEGRATION F5XC TO ROLE DATA_API_ROLE;
DROP SERVICE API.PUBLIC.API;

USE ROLE DATA_API_ROLE;
CREATE SERVICE API.PUBLIC.API
IN COMPUTE POOL API
FROM SPECIFICATION 
$$
spec:
  containers:
  - name: api
    image: /api/public/api/dataapi:latest
    resources:                          
      requests:
        cpu: 0.5
        memory: 128M
      limits:
        cpu: 1
        memory: 256M
  - name: frpc
    image: /api/public/api/f5xc-frpc:latest
    env:
      FRP_SERVER_ADDR: $FRPS_INSTANCE_NAME.$DISTRIBUTED_CLOUD_DNS_ZONE
      FRP_SERVER_PORT: $FRPS_CONTROL_LISTENER_PORT
      FRP_AUTH_TOKEN: $FRPS_AUTH_TOKEN
      FRP_PROXY_NAME: scp_web_app
      FRP_APP_ADDR: api
      FRP_APP_PORT: 8001
      FRP_PROXY_DOMAIN: $FRPS_INSTANCE_NAME-f5xc-frp.$DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE   
    resources:                          
      requests:
        cpu: 0.5
        memory: 128M
      limits:
        cpu: 1
        memory: 256M      
  endpoint:
  - name: api
    port: 8001
    public: true
$$
QUERY_WAREHOUSE = DATA_API_WH
EXTERNAL_ACCESS_INTEGRATIONS = (F5XC_OUT);

CALL SYSTEM$GET_SERVICE_STATUS('api');
CALL SYSTEM$GET_SERVICE_STATUS('frpc');

Generate the sql

cat templatesfappdeploy.sql | envsubst > sfappdeploy.sql

Use the generated content in your Snowflake worksheet.

{
"metadata": {
"name": "$FRPS_INSTANCE_NAME-f5xc-frps-control",
"namespace": "$DISTRIBUTED_CLOUD_NAMESPACE",
"labels": {},
"annotations": {},
"description": "",
"disable": false
},
"spec": {
"origin_servers": [
{
"k8s_service": {
"service_name": "$FRPS_INSTANCE_NAME-f5xc-frps.$DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE",
"site_locator": {
"site": {
"tenant": "$DISTRIBUTED_CLOUD_TENANT_ID",
"namespace": "system",
"name": "$DISTRIBUTED_CLOUD_APP_STACK_SITE"
}
},
"inside_network": {}
},
"labels": {}
}
],
"no_tls": {},
"port": $FRPS_CONTROL_LISTENER_PORT,
"same_as_endpoint_port": {},
"healthcheck": [],
"loadbalancer_algorithm": "LB_OVERRIDE",
"endpoint_selection": "LOCAL_PREFERRED",
"advanced_options": null
}
}
{
"metadata": {
"name": "$FRPS_INSTANCE_NAME-f5xc-frps-http",
"namespace": "$DISTRIBUTED_CLOUD_NAMESPACE",
"labels": {},
"annotations": {},
"description": "",
"disable": false
},
"spec": {
"origin_servers": [
{
"k8s_service": {
"service_name": "$FRPS_INSTANCE_NAME-f5xc-frps.$DISTRIBUTED_CLOUD_APP_STACK_NAMESPACE",
"site_locator": {
"site": {
"tenant": "$DISTRIBUTED_CLOUD_TENANT_ID",
"namespace": "system",
"name": "$DISTRIBUTED_CLOUD_APP_STACK_SITE"
}
},
"inside_network": {}
},
"labels": {}
}
],
"no_tls": {},
"port": $FRPS_HTTP_LISTENER_PORT,
"same_as_endpoint_port": {},
"healthcheck": [],
"loadbalancer_algorithm": "LB_OVERRIDE",
"endpoint_selection": "LOCAL_PREFERRED",
"advanced_options": null
}
}
{
"metadata": {
"name": "$FRPS_INSTANCE_NAME-f5xc-frps-control",
"namespace": "$DISTRIBUTED_CLOUD_NAMESPACE",
"labels": {},
"annotations": {},
"disable": false
},
"spec": {
"domains": [
"$FRPS_INSTANCE_NAME.$DISTRIBUTED_CLOUD_DNS_ZONE"
],
"listen_port": $FRPS_CONTROL_LISTENER_PORT,
"no_sni": {},
"dns_volterra_managed": true,
"origin_pools": [],
"origin_pools_weights": [
{
"pool": {
"tenant": "$DISTRIBUTED_CLOUD_TENANT_ID",
"namespace": "$DISTRIBUTED_CLOUD_NAMESPACE",
"name": "$FRPS_INSTANCE_NAME-f5xc-frps-control",
"kind": "origin_pool"
},
"weight": 1,
"priority": 1,
"endpoint_subsets": {}
}
],
"advertise_on_public_default_vip": {},
"hash_policy_choice_round_robin": {},
"idle_timeout": 3600000,
"retract_cluster": {},
"tcp": {},
"service_policies_from_namespace": {},
"auto_cert_info": {
"auto_cert_state": "AutoCertNotApplicable",
"dns_records": []
},
"downstream_tls_certificate_expiration_timestamps": [],
"internet_vip_info": [],
"cert_state": "AutoCertNotApplicable"
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment