Conductor Documentation

Installing Containerized Wind River Conductor (Helm)

Overview

Use the following step to deploy a Conductor Manager to Kubernetes with a helm chart.

Configure Conductor Helm values

  1. Get the wind-river-conductor-24.3.0.tgz chart file from licensed Wind River registry.
  2. Create an override-values.yaml as described in “Configuration options of Wind River Conductor-services values.yaml” below and customize the fields as required.
    2.1. To run on a WRCP host, for example, you need to add cephfs to all occurrences of storageClass and mgmtworker.resources.pvc.class in that file.
  3. To be able to access Conductor UI and external CLI, configure Ingress service.

Configure Ingress service

Ingress without hostname (Nginx example)

To access Conductor UI and CLI without a hostname, using directly the IP address where it’s installed, use the following:

# override-values.yaml
ingress:
  enabled: true
  ingressClassName: nginx

Ingress with hostname (Nginx example)

To expose Conductor through domain hostname, make sure to have it on network’s DNS server or add it to system’s hosts file.

# override-values.yaml
ingress:
  enabled: true
  host: mydomain-conductor.com
  ingressClassName: nginx

# /etc/hosts Linux example
10.10.100.50  mydomain-conductor.com

Expose Ingress for WRCP Systems (Nginx)

After following previous section, it’s necessary to create a Global Network Policy to allow traffic on HTTP port 80. For HTTPS access, user must setup production-ready certificates and customize Conductor helm values.

Create the following global-networkpolicy.yaml on the WRCP controller host and run kubectl apply -f global-networkpolicy.yaml. This will allow ingress traffic through OAM IP address.

# This rule opens up default HTTP port 80
# To apply use:
# kubectl apply -f global-networkpolicy.yaml
apiVersion: crd.projectcalico.org/v1
kind: GlobalNetworkPolicy
metadata:
  name: gnp-oam-overrides
spec:
  ingress:
  - action: Allow
    destination:
      ports:
      - 80
    protocol: TCP
  order: 500
  selector: has(iftype) && iftype == 'oam'
  types:
  - Ingress

Example of override-values.yaml

# use these values override to run the wind-river-conductor chart

composer_backend:
  image: <registry-link>/composer-backend:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

composer_frontend:
  image: <registry-link>/composer-frontend:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

execution_scheduler:
  image: <registry-link>/execution-scheduler:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

mgmtworker:
  image: <registry-link>/mgmtworker:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []
  resources:
    pvc:
      name: "mgmtworker-pvc"
      size: "10Gi"
      modes:
        - "ReadWriteOnce"
      class: ""

rest_service:
  image: <registry-link>/restservice:latest
  config:
    manager:
      security:
        # When changing these credentials, must set into wrc_endpoint_secret.data.username and .password
        # or make sure to use these anchors
        admin_username: &wrc_admin_user admin
        admin_password: &wrc_admin_pass admin
  curl_image: <registry-link>/alpine/curl:8.5.0
  bind_host: "[::]"
  affinity: {}
  nodeSelector: {}
  tolerations: []

api_service:
  image: <registry-link>/apiservice:latest
  bind_host: "[::]"
  affinity: {}
  nodeSelector: {}
  tolerations: []

stage_backend:
  image: <registry-link>/stage-backend:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

stage_frontend:
  image: <registry-link>/stage-frontend:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

wrc_endpoint_secret:
  # Must match rest_service.config.manager.security.admin_username and .admin_password values or aliases
  data:
    username: *wrc_admin_user
    password: *wrc_admin_pass

system_inventory_manager:
  image: <registry-link>/system-inventory-manager:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

upgrade_group_manager:
  image: <registry-link>/upgrade-group-manager:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

wrc_secret:
  image: <registry-link>/wrc-secret-operator:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []

rest_api_server:
  image: <registry-link>/rest-api-app:latest
  affinity: {}
  nodeSelector: {}
  tolerations: []
  bind_host: "::"
  service:
    name: "rest-api-server"
    type: ClusterIP

nginx:
  image: <registry-link>/nginxinc/nginx-unprivileged:1.25.4
  replicas: 1
  affinity: {}
  nodeSelector: {}
  tolerations: []

seaweedfs:
  master:
    imageOverride: <registry-link>/seaweedfs:3.55
    data:
      storageClass: ""
    ## Must be multi-line string, not map/array type values
    # affinity: ""
    # nodeSelector: ""
    # tolerations: ""
  filer:
    imageOverride: <registry-link>/seaweedfs:3.55
    data:
      storageClass: ""
    ## Must be multi-line string, not map/array type values
    # affinity: ""
    # nodeSelector: ""
    # tolerations: ""
  s3:
    imageOverride: <registry-link>/seaweedfs:3.55
    ## Must be multi-line string, not map/array type values
    # nodeSelector: ""
    # tolerations: ""
  volume:
    imageOverride: <registry-link>/seaweedfs:3.55
    data:
      storageClass: ""
    ## Must be multi-line string, not map/array type values
    # affinity: ""
    # nodeSelector: ""
    # tolerations: ""
  clientImage: <registry-link>/aws-cli:2.13.9

prometheus:
  alertmanager:
    image:
      registry: ""
      repository: <registry-link>/alertmanager
      tag: 0.25.0-debian-11-r62
  server:
    persistence:
      storageClass: ""
      accessModes:
        - ReadWriteOnce
      size: 8Gi
    image:
      registry: ""
      repository: <registry-link>/prometheus
      tag: 2.45.0-debian-11-r0
    thanos:
      image:
        registry: ""
        repository: <registry-link>/thanos
        tag: 0.31.0-scratch-r8
    nodeSelector: {}
    tolerations: []
    affinity: {}
  volumePermissions:
    image:
      registry: ""
      repository: <registry-link>/bitnami-shell
      tag: 11-debian-11-r130

rabbitmq:
  image:
    registry: ""
    repository: <registry-link>/rabbitmq
    tag: 3.12.2-debian-11-r8
  persistence:
    storageClass: ""
    accessModes:
      - ReadWriteOnce
    size: 8Gi
  volumePermissions:
    image:
      registry: ""
      repository: <registry-link>/os-shell
      tag: 11-debian-11-r16
  affinity: {}
  nodeSelector: {}
  tolerations: []

postgresql:
  image:
    registry: ""
    repository: <registry-link>/postgresql
    tag: 15.3.0-debian-11-r17
  primary:
    persistence:
      storageClass: ""
      accessModes:
        - ReadWriteOnce
      size: 8Gi
    affinity: {}
    nodeSelector: {}
    tolerations: []
  readReplicas: # ignored if architecture != "replication"; default is "standalone"
    persistence:
      storageClass: ""
      accessModes:
        - ReadWriteOnce
      size: 8Gi
    affinity: {}
    nodeSelector: {}
    tolerations: []
  volumePermissions:
    image:
      registry: ""
      repository: <registry-link>/bitnami-shell
      tag: 11-debian-11-r130
  metrics:
    image:
      registry: ""
      repository: <registry-link>/postgres-exporter
      tag: 0.13.1-debian-11-r0

kube-state-metrics:
  image:
    registry: ""
    repository: <registry-link>/kube-state-metrics
    tag: 2.9.2-debian-11-r14
  affinity: {}
  nodeSelector: {}
  tolerations: []

ingress:
  enabled: true
  host:
  ingressClassName: nginx
  annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: 100m
  # TLS settings
  tls: false
  secretName: cfy-secret-name

# These files should be provided by a HTTP/File server
resources:
  packages:
    agents:
      manylinux-x86_64-agent.tar.gz: <artifactory-link>/manylinux-x86_64-agent_7.0.0-ga.tar.gz
      manylinux-aarch64-agent.tar.gz: <artifactory-link>/manylinux-aarch64-agent_7.0.0-ga.tar.gz
      cloudify-windows-agent.exe: <artifactory-link>/cloudify-windows-agent_7.0.0-ga.exe



Note When using a non-default username/password, make sure rest_service.config.manager.security.admin_username and .admin_password values match wrc_endpoint_secret.data.username and .password

Install Conductor Helm application

After configuring override-values.yaml file, run the following command to install:

helm install wind-river-conductor -f ./override-values.yaml ./wind-river-conductor-24.3.0.tgz

The installation is complete when all pods are running as shown in the example below.
If the Ingress service is configured correctly, Conductor UI should be available at http://<helm-host-ip>/console

kubectl get pods
NAME                                       READY     STATUS     RESTARTS     AGE
api-service-64d4fb5f54-2ptgs               1/1       Running    0            16m
composer-backend-774467bb95-797dw          1/1       Running    3            16m
composer-frontend-7f7bddc878-8jxdw         1/1       Running    0            16m
execution-scheduler-9cfdbd977-nvvpf        1/1       Running    0            16m
kube-state-metrics-7454f94b6b-nwc2w        1/1       Running    0            16m
mgmtworker-0                               1/1       Running    2            16m
nginx-f7df98d99-2h2pd                      1/1       Running    0            16m
postgresql-0                               2/2       Running    0            16m
prometheus-server-5d47545645-584tf         1/1       Running    0            16m
rabbitmq-0                                 1/1       Running    0            16m
rest-api-server-85c79ccfb6-xvg2c           1/1       Running    0            16m
rest-service-555c864896-gmrs4              1/1       Running    0            16m
seaweedfs-filer-0                          1/1       Running    0            16m
seaweedfs-master-0                         1/1       Running    0            16m
seaweedfs-s3-68b88dd7bd-gpnvg              1/1       Running    0            16m
seaweedfs-volume-0                         1/1       Running    0            16m
stage-backend-d9bdc6567-flcdk              1/1       Running    3            16m
stage-frontend-58d4b575b4-njh2n            1/1       Running    0            16m
system-inventory-manager-7479cc894-8wqwb   1/1       Running    0            16m
upgrade-group-manager-764d4b8598-lhcpt     1/1       Running    0            16m
wrc-secret-operator-7dd5d78b46-xx7bk       1/1       Running    0            16m

Accessing Conductor external CLI

Install the package using Python package manager, optionally on a virtualenv:

# virtualenv Python 3.11
(venv)> pip install --upgrade pip
(venv)> pip install "urllib3<2" cloudify==7.0.3
(venv)> cfy --version
Cloudify CLI 7.0.3

(venv)> cfy profiles use 10.10.100.50 -u admin -p admin
Attempting to connect to 10.10.100.50 through port 80, using http (SSL mode: False)...
Using manager 10.10.100.50 with port 80
Initializing profile 10.10.100.50...
Initialization completed successfully
It is highly recommended to have more than one manager in a Cloudify cluster
Adding cluster node localhost to local profile manager cluster
Adding cluster node rabbitmq to local profile broker cluster
Profile is up to date with 2 nodes

(venv)> cfy profiles list
Listing all profiles...

Profiles:
+----------------+---------------+------------------+----------------+----------+---------+----------+--------------+-----------+---------------+------------------+
|      name      |   manager_ip  | manager_username | manager_tenant | ssh_user | ssh_key | ssh_port | kerberos_env | rest_port | rest_protocol | rest_certificate |
+----------------+---------------+------------------+----------------+----------+---------+----------+--------------+-----------+---------------+------------------+
| *10.10.100.50  | 10.10.100.50  |      admin       |                |          |         |    22    |    False     |     80    |      http     |                  |
+----------------+---------------+------------------+----------------+----------+---------+----------+--------------+-----------+---------------+------------------+

(venv)> cfy status
Retrieving manager services status... [ip=10.10.100.50]

Services:
+--------------------------------+--------+
|            service             | status |
+--------------------------------+--------+
| Webserver                      | Active |
| Management Worker              | Active |
| Manager Rest-Service           | Active |
| Cloudify API                   | Active |
| Cloudify Execution Scheduler   | Active |
| Cloudify Console               | Active |
| PostgreSQL                     | Active |
| RabbitMQ                       | Active |
| Cloudify Composer              | Active |
| Monitoring Service             | Active |
+--------------------------------+--------+

Uninstalling Helm

To uninstall Helm, use the following command:

helm uninstall wind-river-conductor

All pods need to be deleted.

Note:

After uninstalling Helm, PersistentVolumeClaims (PVCs) and PersistentVolumes (PVs) created by Seaweedfs, RabbitMQ, PostgreSQL and WRC MGMTWORKER statefulset will not be deleted.

NAME                                               STATUS    VOLUME                                     CAPACITY   ACCESSMODE     STORAGECLASS   AGE
data-default-seaweedfs-master-0                    Bound     pvc-0056baff-d819-460b-9ac4-bab4bbe2490a   1Gi        RWO            local-path     10m
data-filer-seaweedfs-filer-0                       Bound     pvc-fc612b89-5f47-4105-8b4d-388133cb05b1   1Gi        RWO            local-path     10m
data-postgresql-primary-0                          Bound     pvc-fd7545fa-a662-481c-8f07-7472870322d9   8Gi        RWO            local-path     10m
seaweedfs-master-log-volume-seaweedfs-master-0     Bound     pvc-5f0d868a-bb5f-4a7d-ac2d-e2ec7bc8535d   1Gi        RWO            local-path     10m
data-rabbitmq-0                                    Bound     pvc-bac34ffe-849c-4f01-b074-6d9ad3425952   8Gi        RWO            local-path     10m
data-seaweedfs-volume-0                            Bound     pvc-5dc884fd-eafe-4b25-838e-ca729cb5c31d   10Gi       RWO            local-path     10m
seaweedfs-filer-log-volume-seaweedfs-filer-0       Bound     pvc-9d0ef1f8-5cf9-4211-8add-4ae19b2e0375   1Gi        RWO            local-path     10m
logs-seaweedfs-volume-0                            Bound     pvc-91b9eed0-b113-4604-8d6b-b7783c0e84bf   1Gi        RWO            local-path     10m
data-postgresql-read-0                             Bound     pvc-25557a04-b38d-4215-a3d0-eff18d8e16a0   8Gi        RWO            local-path     10m
mgmtworker-pvc-mgmtworker-0                        Bound     pvc-57f770bd-cead-4c1d-a130-8f2ce9c4560a   10Gi       RWO            local-path     10m
data-rabbitmq-1                                    Bound     pvc-b30c3250-65ca-46a8-99d4-50045edeb80c   8Gi        RWO            local-path     9m52s
mgmtworker-pvc-mgmtworker-1                        Bound     pvc-6350253d-566e-432e-82c4-4fcda5742880   10Gi       RWO            local-path     6m39s 

Bitnami’s charts and the WRC MGMTWorker includes a Statefulset which uses VolumeClaimTemplates to generate new Persistent Volume Claims (PVCs) for each replica created, Helm does not track those PVCs. Therefore, when uninstalling a chart release with these characteristics, the PVCs (and associated Persistent Volumes) are not removed from the cluster. This is a know issue from https://github.com/helm/helm/issues/5156 / https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#persistence-volumes-pvs-retained-from-previous-releases

These PVCs/PVs can be re-used if the Helm deployment is performed again or they can be cleaned up manually as a post-removal step if Conductor is to be removed permanently or a full cleanup is necessary.

To delete these volumes, use: kubectl -n <pvc_namespace> delete pvc <pvc_name>

Using IPv6

When deploying on an IPv6-only cluster, additional settings must be applied to make RabbitMQ compatible. See values-ipv6.yaml for the changed parameters, or use this values file directly when installing the chart. Remember that when using an IPv6 cluster, the ingress controller must support IPv6 as well.

rabbitmq:
  initContainers:
    - name: ipv6-init
      image: "<registry-link>/busybox:1.33.1"
      imagePullPolicy: IfNotPresent
      volumeMounts:
      - name: ipv6-cfg
        mountPath: /ipv6
      command: ['sh', '-c', 'echo "{inet6, true}." > /ipv6/erl_inetrc']
  extraVolumes:
    - name: ipv6-cfg
      emptyDir: {}
  extraVolumeMounts:
    - name: ipv6-cfg
      mountPath: /ipv6
  extraEnvVars:
    - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS
      value: "-kernel inetrc '/ipv6/erl_inetrc' -proto_dist inet6_tcp"
    - name: RABBITMQ_CTL_ERL_ARGS
      value: "-proto_dist inet6_tcp"
  extraConfiguration: |-
    management.ssl.ip         = ::
    management.ssl.port       = 15671
    management.ssl.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem
    management.ssl.certfile   = /opt/bitnami/rabbitmq/certs/server_certificate.pem
    management.ssl.keyfile    = /opt/bitnami/rabbitmq/certs/server_key.pem

rest_service:
  bind_host: "[::]"

api_service:
  bind_host: "[::]"

rest_api_server:
  bind_host: "::"


Scaling

There’s several components you might want to scale up:

For an example of a scaled-up deployment, see the scaled-up-values.yaml example values file.

# this is an example values file showing how to scale up several services

# rabbitmq and mgmtworker can be scaled by just increasing the pod count
rabbitmq:
  replicaCount: 2

mgmtworker:
  replicas: 2

# with the bitnami postgresql chart, using architecture=replication will
# start a primary and a read replica
postgresql:
  architecture: replication
  readReplicas:
    replicaCount: 1

# all the db-using services need to be instructed to connect to the primary
db:
  host: "postgresql-primary"


Configuration options of Wind River Conductor-services values.yaml

Key Type Default Description
api_service.affinity object {}
api_service.bind_host string "[::]" bind service in container’s host address, change to 0.0.0.0 for IPv4 only Kubernetes cluster, if default fails. "[::]" will bind to IPv6 if available, in case of IPv6 cluster, or it will fall back to binding to IPv4. IMPORTANT: The IPv6-to-IPv4 might not work on all container base system and misconfigured Kubernetes cluster and host.
api_service.containerSecurityContext.allowPrivilegeEscalation bool false
api_service.containerSecurityContext.capabilities.drop[0] string "ALL"
api_service.containerSecurityContext.enabled bool true
api_service.containerSecurityContext.runAsNonRoot bool true
api_service.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
api_service.image string "docker.io/cloudifyplatform/cloudify-manager-apiservice:7.0.3"
api_service.imagePullPolicy string "IfNotPresent"
api_service.nodeSelector object {}
api_service.port int 8101
api_service.probes.liveness.enabled bool true
api_service.probes.liveness.failureThreshold int 3
api_service.probes.liveness.initialDelaySeconds int 20
api_service.probes.liveness.periodSeconds int 20
api_service.probes.liveness.successThreshold int 1
api_service.probes.liveness.timeoutSeconds int 10
api_service.replicas int 1
api_service.tolerations list []
certs.ca_cert string ""
certs.ca_key string ""
certs.external_cert string ""
certs.external_key string ""
certs.internal_cert string ""
certs.internal_key string ""
certs.rabbitmq_cert string ""
certs.rabbitmq_key string ""
composer_backend.affinity object {}
composer_backend.containerSecurityContext.allowPrivilegeEscalation bool false
composer_backend.containerSecurityContext.capabilities.drop[0] string "ALL"
composer_backend.containerSecurityContext.enabled bool true
composer_backend.containerSecurityContext.runAsNonRoot bool true
composer_backend.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
composer_backend.image string "docker.io/cloudifyplatform/cloudify-manager-composer-backend:7.0.3"
composer_backend.imagePullPolicy string "IfNotPresent"
composer_backend.nodeSelector object {}
composer_backend.port int 3000
composer_backend.probes.liveness.enabled bool false
composer_backend.probes.liveness.failureThreshold int 3
composer_backend.probes.liveness.initialDelaySeconds int 20
composer_backend.probes.liveness.periodSeconds int 20
composer_backend.probes.liveness.successThreshold int 1
composer_backend.probes.liveness.timeoutSeconds int 10
composer_backend.replicas int 1
composer_backend.tolerations list []
composer_frontend.affinity object {}
composer_frontend.containerSecurityContext.allowPrivilegeEscalation bool false
composer_frontend.containerSecurityContext.capabilities.drop[0] string "ALL"
composer_frontend.containerSecurityContext.enabled bool true
composer_frontend.containerSecurityContext.readOnlyRootFilesystem bool true
composer_frontend.containerSecurityContext.runAsNonRoot bool true
composer_frontend.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
composer_frontend.image string "docker.io/cloudifyplatform/cloudify-manager-composer-frontend:7.0.3"
composer_frontend.imagePullPolicy string "IfNotPresent"
composer_frontend.nodeSelector object {}
composer_frontend.port int 8188
composer_frontend.probes.liveness.enabled bool true
composer_frontend.probes.liveness.failureThreshold int 3
composer_frontend.probes.liveness.initialDelaySeconds int 20
composer_frontend.probes.liveness.periodSeconds int 20
composer_frontend.probes.liveness.successThreshold int 1
composer_frontend.probes.liveness.timeoutSeconds int 10
composer_frontend.replicas int 1
composer_frontend.tolerations list []
db.dbName string "cloudify_db"
db.host string "postgresql"
db.k8sSecret.key string "password"
db.k8sSecret.name string "cloudify-db-creds"
db.password string "cloudify"
db.user string "cloudify"
execution_scheduler.affinity object {}
execution_scheduler.containerSecurityContext.allowPrivilegeEscalation bool false
execution_scheduler.containerSecurityContext.capabilities.drop[0] string "ALL"
execution_scheduler.containerSecurityContext.enabled bool true
execution_scheduler.containerSecurityContext.runAsNonRoot bool true
execution_scheduler.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
execution_scheduler.image string "docker.io/cloudifyplatform/cloudify-manager-execution-scheduler:7.0.3"
execution_scheduler.imagePullPolicy string "IfNotPresent"
execution_scheduler.nodeSelector object {}
execution_scheduler.replicas int 1
execution_scheduler.tolerations list []
fullnameOverride string ""
imagePullSecrets list []
ingress.annotations.“nginx.ingress.kubernetes.io/proxy-body-size” string "100m"
ingress.enabled bool true
ingress.host string nil
ingress.ingressClassName string "nginx"
ingress.secretName string "cfy-secret-name"
ingress.tls bool false
kube-state-metrics object object Parameters group for bitnami/kube-state-metrics helm chart. Details: https://github.com/bitnami/charts/tree/main/bitnami/kube-state-metrics/README.md
mgmtworker.access.local_cluster bool true
mgmtworker.affinity object {}
mgmtworker.containerSecurityContext.allowPrivilegeEscalation bool false
mgmtworker.containerSecurityContext.capabilities.drop[0] string "ALL"
mgmtworker.containerSecurityContext.enabled bool true
mgmtworker.containerSecurityContext.runAsNonRoot bool true
mgmtworker.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
mgmtworker.image string "docker.io/cloudifyplatform/cloudify-manager-mgmtworker:7.0.3"
mgmtworker.imagePullPolicy string "IfNotPresent"
mgmtworker.nodeSelector object {}
mgmtworker.replicas int 1
mgmtworker.resources.pvc.class string ""
mgmtworker.resources.pvc.modes[0] string "ReadWriteOnce"
mgmtworker.resources.pvc.name string "mgmtworker-pvc"
mgmtworker.resources.pvc.size string "10Gi"
mgmtworker.serviceAccount string "mgmtworker-serviceaccount"
mgmtworker.tolerations list []
nameOverride string ""
nginx.affinity object {}
nginx.containerSecurityContext.allowPrivilegeEscalation bool false
nginx.containerSecurityContext.capabilities.drop[0] string "ALL"
nginx.containerSecurityContext.enabled bool true
nginx.containerSecurityContext.runAsNonRoot bool true
nginx.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
nginx.image string "nginxinc/nginx-unprivileged"
nginx.imagePullPolicy string "IfNotPresent"
nginx.nodeSelector object {}
nginx.probes.liveness.enabled bool true
nginx.probes.liveness.failureThreshold int 3
nginx.probes.liveness.initialDelaySeconds int 20
nginx.probes.liveness.periodSeconds int 20
nginx.probes.liveness.successThreshold int 1
nginx.probes.liveness.timeoutSeconds int 10
nginx.rate_limit.burst int 30
nginx.rate_limit.delay int 20
nginx.rate_limit.enabled bool true
nginx.rate_limit.memory string "200m"
nginx.rate_limit.rate string "100r/s"
nginx.replicas int 1
nginx.tolerations list []
oas_service_account.serviceAccount string "conductor-operator-sa"
postgresql.auth.database string "cloudify_db"
postgresql.auth.password string "cloudify"
postgresql.auth.username string "cloudify"
postgresql.containerPorts.postgresql int 5432
postgresql.enableNetworkPolicy bool true
postgresql.enabled bool true
postgresql.fullnameOverride string "postgresql"
postgresql.image.pullPolicy string "IfNotPresent"
postgresql.image.tag string "15.3.0-debian-11-r17"
postgresql.metrics.containerSecurityContext.allowPrivilegeEscalation bool false
postgresql.metrics.containerSecurityContext.capabilities.drop[0] string "ALL"
postgresql.metrics.containerSecurityContext.enabled bool true
postgresql.metrics.containerSecurityContext.runAsNonRoot bool true
postgresql.metrics.containerSecurityContext.runAsUser int 1001
postgresql.metrics.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
postgresql.metrics.enabled bool true
postgresql.podLabels object {}
postgresql.primary.affinity object {}
postgresql.primary.containerSecurityContext.allowPrivilegeEscalation bool false
postgresql.primary.containerSecurityContext.capabilities.drop[0] string "ALL"
postgresql.primary.containerSecurityContext.enabled bool true
postgresql.primary.containerSecurityContext.runAsNonRoot bool true
postgresql.primary.containerSecurityContext.runAsUser int 1001
postgresql.primary.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
postgresql.primary.nodeSelector object {}
postgresql.primary.persistence.accessModes[0] string "ReadWriteOnce"
postgresql.primary.persistence.size string "8Gi"
postgresql.primary.persistence.storageClass string ""
postgresql.primary.resources.limits.cpu int 2
postgresql.primary.resources.limits.memory string "2Gi"
postgresql.primary.resources.requests.cpu float 0.5
postgresql.primary.resources.requests.memory string "256Mi"
postgresql.primary.tolerations list []
postgresql.readReplicas.affinity object {}
postgresql.readReplicas.containerSecurityContext.allowPrivilegeEscalation bool false
postgresql.readReplicas.containerSecurityContext.capabilities.drop[0] string "ALL"
postgresql.readReplicas.containerSecurityContext.enabled bool true
postgresql.readReplicas.containerSecurityContext.runAsNonRoot bool true
postgresql.readReplicas.containerSecurityContext.runAsUser int 1001
postgresql.readReplicas.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
postgresql.readReplicas.nodeSelector object {}
postgresql.readReplicas.persistence.accessModes[0] string "ReadWriteOnce"
postgresql.readReplicas.persistence.size string "8Gi"
postgresql.readReplicas.persistence.storageClass string ""
postgresql.readReplicas.tolerations list []
prometheus object object Parameters group for bitnami/prometheus helm chart. Details: https://github.com/bitnami/charts/blob/main/bitnami/prometheus/README.md
rabbitmq.advancedConfiguration string "[\n {rabbit, [\n {consumer_timeout, undefined}\n ]}\n]."
rabbitmq.affinity object {}
rabbitmq.auth.erlangCookie string "cloudify-erlang-cookie"
rabbitmq.auth.password string "c10udify"
rabbitmq.auth.tls.enabled bool true
rabbitmq.auth.tls.existingSecret string "rabbitmq-ssl-certs"
rabbitmq.auth.tls.failIfNoPeerCert bool false
rabbitmq.auth.username string "cloudify"
rabbitmq.containerSecurityContext.allowPrivilegeEscalation bool false
rabbitmq.containerSecurityContext.capabilities.drop[0] string "ALL"
rabbitmq.containerSecurityContext.enabled bool true
rabbitmq.containerSecurityContext.runAsNonRoot bool true
rabbitmq.containerSecurityContext.runAsUser int 1001
rabbitmq.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
rabbitmq.enableNetworkPolicy bool true
rabbitmq.enabled bool true
rabbitmq.extraConfiguration string "management.ssl.port = 15671\nmanagement.ssl.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem\nmanagement.ssl.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem\nmanagement.ssl.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem"
rabbitmq.extraSecrets.rabbitmq-load-definition.“load_definition.json” string "{\n \"vhosts\": [\n {\n \"name\": \"/\"\n }\n ],\n \"users\": [\n {\n \"hashing_algorithm\": \"rabbit_password_hashing_sha256\",\n \"name\": \"{{ .Values.auth.username }}\",\n \"password\": \"{{ .Values.auth.password }}\",\n \"tags\": \"administrator\"\n }\n ],\n \"permissions\": [\n {\n \"user\": \"{{ .Values.auth.username }}\",\n \"vhost\": \"/\",\n \"configure\": \".*\",\n \"write\": \".*\",\n \"read\": \".*\"\n }\n ],\n \"policies\": [\n {\n \"name\": \"logs_queue_message_policy\",\n \"vhost\": \"/\",\n \"pattern\": \"^cloudify-log$\",\n \"priority\": 100,\n \"apply-to\": \"queues\",\n \"definition\": {\n \"message-ttl\": 1200000,\n \"max-length\": 1000000,\n \"ha-mode\": \"all\",\n \"ha-sync-mode\": \"automatic\",\n \"ha-sync-batch-size\": 50\n }\n },\n {\n \"name\": \"events_queue_message_policy\",\n \"vhost\": \"/\",\n \"pattern\": \"^cloudify-events$\",\n \"priority\": 100,\n \"apply-to\": \"queues\",\n \"definition\": {\n \"message-ttl\": 1200000,\n \"max-length\": 1000000,\n \"ha-mode\": \"all\",\n \"ha-sync-mode\": \"automatic\",\n \"ha-sync-batch-size\": 50\n }\n },\n {\n \"name\": \"default_policy\",\n \"vhost\": \"/\",\n \"pattern\": \"^\",\n \"priority\": 1,\n \"apply-to\": \"queues\",\n \"definition\": {\n \"ha-mode\": \"all\",\n \"ha-sync-mode\": \"automatic\",\n \"ha-sync-batch-size\": 50\n }\n }\n ],\n \"queues\": [\n {\n \"arguments\": {},\n \"auto_delete\": false,\n \"durable\": true,\n \"name\": \"cloudify.management_operation\",\n \"type\": \"classic\",\n \"vhost\": \"/\"\n },\n {\n \"arguments\": {},\n \"auto_delete\": false,\n \"durable\": true,\n \"name\": \"cloudify.management_workflow\",\n \"type\": \"classic\",\n \"vhost\": \"/\"\n }\n ],\n \"bindings\": [\n {\n \"arguments\": {},\n \"destination\": \"cloudify.management_operation\",\n \"destination_type\": \"queue\",\n \"routing_key\": \"operation\",\n \"source\": \"cloudify.management\",\n \"vhost\": \"/\"\n },\n {\n \"arguments\": {},\n \"destination\": \"cloudify.management_workflow\",\n \"destination_type\": \"queue\",\n \"routing_key\": \"workflow\",\n \"source\": \"cloudify.management\",\n \"vhost\": \"/\"\n }\n ],\n \"exchanges\": [\n {\n \"arguments\": {},\n \"auto_delete\": false,\n \"durable\": true,\n \"name\": \"cloudify.management\",\n \"type\": \"direct\",\n \"vhost\": \"/\"\n }\n ]\n}\n"
rabbitmq.fullnameOverride string "rabbitmq"
rabbitmq.image.pullPolicy string "IfNotPresent"
rabbitmq.image.tag string "3.12.2-debian-11-r8"
rabbitmq.loadDefinition.enabled bool true
rabbitmq.loadDefinition.existingSecret string "rabbitmq-load-definition"
rabbitmq.metrics.enabled bool true
rabbitmq.nodeSelector object {}
rabbitmq.persistence.accessModes[0] string "ReadWriteOnce"
rabbitmq.persistence.size string "8Gi"
rabbitmq.persistence.storageClass string ""
rabbitmq.plugins string "rabbitmq_management rabbitmq_prometheus rabbitmq_tracing rabbitmq_peer_discovery_k8s"
rabbitmq.podLabels object {}
rabbitmq.resources.limits.cpu int 4
rabbitmq.resources.limits.memory string "1Gi"
rabbitmq.resources.requests.cpu float 0.5
rabbitmq.resources.requests.memory string "512Mi"
rabbitmq.service.extraPorts[0].name string "manager-ssl"
rabbitmq.service.extraPorts[0].port int 15671
rabbitmq.service.extraPorts[0].targetPort int 15671
rabbitmq.service.ports.metrics int 15692
rabbitmq.tolerations list []
resources.packages.agents.“cloudify-windows-agent.exe” string "https://cloudify-release-eu.s3.amazonaws.com/cloudify/7.0.0/ga-release/cloudify-windows-agent_7.0.0-ga.exe"
resources.packages.agents.“manylinux-aarch64-agent.tar.gz” string "https://cloudify-release-eu.s3.amazonaws.com/cloudify/7.0.0/ga-release/manylinux-aarch64-agent_7.0.0-ga.tar.gz"
resources.packages.agents.“manylinux-x86_64-agent.tar.gz” string "https://cloudify-release-eu.s3.amazonaws.com/cloudify/7.0.0/ga-release/manylinux-x86_64-agent_7.0.0-ga.tar.gz"
rest_api_server.affinity object {}
rest_api_server.bind_host string "::" bind service in container’s host address, change to 0.0.0.0 for IPv4 only Kubernetes cluster, if default fails. "::" will bind to IPv6 if available, in case of IPv6 cluster, or it will fall back to binding to IPv4. IMPORTANT: The IPv4-mapped IPv6 addresses might not work on all container base system and misconfigured Kubernetes cluster and host.
rest_api_server.containerSecurityContext.allowPrivilegeEscalation bool false
rest_api_server.containerSecurityContext.capabilities.drop[0] string "ALL"
rest_api_server.containerSecurityContext.enabled bool true
rest_api_server.containerSecurityContext.runAsNonRoot bool true
rest_api_server.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
rest_api_server.image string "device.registry.nl.devstar.cloud/conductor/rest-api-app:latest"
rest_api_server.imagePullPolicy string "IfNotPresent"
rest_api_server.nodeSelector object {}
rest_api_server.port int 8000
rest_api_server.probes.liveness.enabled bool true
rest_api_server.probes.liveness.failureThreshold int 3
rest_api_server.probes.liveness.initialDelaySeconds int 20
rest_api_server.probes.liveness.periodSeconds int 20
rest_api_server.probes.liveness.successThreshold int 1
rest_api_server.probes.liveness.timeoutSeconds int 10
rest_api_server.replicas int 1
rest_api_server.resources.limits.memory string "512Mi"
rest_api_server.resources.requests.memory string "256Mi"
rest_api_server.service.name string "rest-api-service"
rest_api_server.service.type string "ClusterIP"
rest_api_server.tolerations list []
rest_service.affinity object {}
rest_service.bind_host string "[::]" bind service in container’s host address, change to 0.0.0.0 for IPv4 only Kubernetes cluster, if default fails. "[::]" will bind to IPv6 if available, in case of IPv6 cluster, or it will fall back to binding to IPv4. IMPORTANT: The IPv4-mapped IPv6 addresses might not work on all container base system and misconfigured Kubernetes cluster and host.
rest_service.config.manager.file_server_type string "s3"
rest_service.config.manager.hostname string "cloudify-manager"
rest_service.config.manager.private_ip string "localhost"
rest_service.config.manager.prometheus_url string "http://prometheus-server:9090"
rest_service.config.manager.public_ip string "localhost"
rest_service.config.manager.s3_resources_bucket string "resources"
rest_service.config.manager.s3_server_url string "" s3_server_url is the address of the s3 endpoint. Ignored and auto-generated when using builtin seaweedfs
rest_service.config.manager.security.admin_password string "admin"
rest_service.config.manager.security.admin_username string "admin"
rest_service.configPath string "/tmp/config.yaml"
rest_service.containerSecurityContext.allowPrivilegeEscalation bool false
rest_service.containerSecurityContext.capabilities.drop[0] string "ALL"
rest_service.containerSecurityContext.enabled bool true
rest_service.containerSecurityContext.runAsGroup int 1000
rest_service.containerSecurityContext.runAsNonRoot bool true
rest_service.containerSecurityContext.runAsUser int 1000
rest_service.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
rest_service.curl_image string "alpine/curl"
rest_service.image string "docker.io/cloudifyplatform/cloudify-manager-restservice:7.0.3"
rest_service.imagePullPolicy string "IfNotPresent"
rest_service.nodeSelector object {}
rest_service.port int 8100
rest_service.probes.liveness.enabled bool false
rest_service.probes.liveness.failureThreshold int 3
rest_service.probes.liveness.initialDelaySeconds int 20
rest_service.probes.liveness.periodSeconds int 20
rest_service.probes.liveness.successThreshold int 1
rest_service.probes.liveness.timeoutSeconds int 10
rest_service.replicas int 1
rest_service.s3.credentials_secret_name string "seaweedfs-s3-secret"
rest_service.s3.session_token_secret_name string ""
rest_service.serviceAccount string "restservice-sa"
rest_service.tolerations list []
seaweedfs object object Parameters group for seaweed helm chart. Details: https://github.com/seaweedfs/seaweedfs/tree/master/k8s/charts/seaweedfs
seaweedfs.clientImage string object Parameters group for awscli containers (using as init containers)
service.port int 8080
service.type string "ClusterIP"
serviceAccount.annotations object {}
serviceAccount.create bool true
serviceAccount.name string ""
stage_backend.affinity object {}
stage_backend.containerSecurityContext.allowPrivilegeEscalation bool false
stage_backend.containerSecurityContext.capabilities.drop[0] string "ALL"
stage_backend.containerSecurityContext.enabled bool true
stage_backend.containerSecurityContext.runAsNonRoot bool true
stage_backend.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
stage_backend.image string "docker.io/cloudifyplatform/cloudify-manager-stage-backend:7.0.3"
stage_backend.imagePullPolicy string "IfNotPresent"
stage_backend.maps.accessToken string ""
stage_backend.maps.attribution string ""
stage_backend.maps.tilesUrlTemplate string ""
stage_backend.nodeSelector object {}
stage_backend.port int 8088
stage_backend.probes.liveness.enabled bool true
stage_backend.probes.liveness.failureThreshold int 3
stage_backend.probes.liveness.initialDelaySeconds int 20
stage_backend.probes.liveness.periodSeconds int 20
stage_backend.probes.liveness.successThreshold int 1
stage_backend.probes.liveness.timeoutSeconds int 10
stage_backend.replicas int 1
stage_backend.tolerations list []
stage_frontend.affinity object {}
stage_frontend.containerSecurityContext.allowPrivilegeEscalation bool false
stage_frontend.containerSecurityContext.capabilities.drop[0] string "ALL"
stage_frontend.containerSecurityContext.enabled bool true
stage_frontend.containerSecurityContext.readOnlyRootFilesystem bool true
stage_frontend.containerSecurityContext.runAsNonRoot bool true
stage_frontend.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
stage_frontend.image string "docker.io/cloudifyplatform/cloudify-manager-stage-frontend:7.0.3"
stage_frontend.imagePullPolicy string "IfNotPresent"
stage_frontend.nodeSelector object {}
stage_frontend.port int 8188
stage_frontend.probes.liveness.enabled bool true
stage_frontend.probes.liveness.failureThreshold int 3
stage_frontend.probes.liveness.initialDelaySeconds int 20
stage_frontend.probes.liveness.periodSeconds int 20
stage_frontend.probes.liveness.successThreshold int 1
stage_frontend.probes.liveness.timeoutSeconds int 10
stage_frontend.replicas int 1
stage_frontend.tolerations list []
system_inventory_manager.affinity object {}
system_inventory_manager.containerSecurityContext.allowPrivilegeEscalation bool false
system_inventory_manager.containerSecurityContext.capabilities.drop[0] string "ALL"
system_inventory_manager.containerSecurityContext.enabled bool true
system_inventory_manager.containerSecurityContext.runAsNonRoot bool true
system_inventory_manager.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
system_inventory_manager.image string "device.registry.nl.devstar.cloud/conductor/system-inventory-manager:latest"
system_inventory_manager.imagePullPolicy string "IfNotPresent"
system_inventory_manager.nodeSelector object {}
system_inventory_manager.port int 8080
system_inventory_manager.replicas int 1
system_inventory_manager.resources.limits.memory string "512Mi"
system_inventory_manager.resources.requests.memory string "256Mi"
system_inventory_manager.tolerations list []
test_connection.image string "busybox"
upgrade_group_manager.affinity object {}
upgrade_group_manager.containerSecurityContext.allowPrivilegeEscalation bool false
upgrade_group_manager.containerSecurityContext.capabilities.drop[0] string "ALL"
upgrade_group_manager.containerSecurityContext.enabled bool true
upgrade_group_manager.containerSecurityContext.runAsNonRoot bool true
upgrade_group_manager.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
upgrade_group_manager.image string "device.registry.nl.devstar.cloud/conductor/upgrade-group-manager:latest"
upgrade_group_manager.imagePullPolicy string "IfNotPresent"
upgrade_group_manager.nodeSelector object {}
upgrade_group_manager.port int 8080
upgrade_group_manager.replicas int 1
upgrade_group_manager.resources.limits.memory string "512Mi"
upgrade_group_manager.resources.requests.memory string "256Mi"
upgrade_group_manager.tolerations list []
wrc_endpoint_secret.data.password string "admin"
wrc_endpoint_secret.data.username string "admin"
wrc_endpoint_secret.stringData.tenant string "default_tenant"
wrc_secret.affinity object {}
wrc_secret.containerSecurityContext.allowPrivilegeEscalation bool false
wrc_secret.containerSecurityContext.capabilities.drop[0] string "ALL"
wrc_secret.containerSecurityContext.enabled bool true
wrc_secret.containerSecurityContext.runAsNonRoot bool true
wrc_secret.containerSecurityContext.seccompProfile.type string "RuntimeDefault"
wrc_secret.image string "device.registry.nl.devstar.cloud/conductor/wrc-secret-operator:latest"
wrc_secret.imagePullPolicy string "IfNotPresent"
wrc_secret.nodeSelector object {}
wrc_secret.port int 8080
wrc_secret.replicas int 1
wrc_secret.resources.limits.memory string "512Mi"
wrc_secret.resources.requests.memory string "256Mi"
wrc_secret.tolerations list []