diff --git a/README.md b/README.md index 4f1524ce..9d956009 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,33 @@ # Freeleaps Ops > All GitOps relates manifests or scripts. + +## Introduce + +This repo has stored all manifests files of Freeleaps productions, infrastructures and cluster. + +The project layout is follow this pattern: + +![layout](assets/layout.png) + +## How to use ? + +**Please do not edit files under this repo if you do not know what you doing.** + +This repo files are managed by CI/CD services (Jenkins, Argo CD etc,.). + +## About `cluster` folder + +**P.S. THIS FOLDER IS ONLY USED FOR CLUSTER ADMINISTRATOR, YOU MUST KNOW WHAT YOU DOING BEFORE YOU MODIFY IT.** + +> If you want to working with cluster folder, you must set-up local maintaining environment first. +> You need make sure you have `Python 3` environment in local and `Ansible` already installed. +> And you need to make sure run `git submodule update --init --recursive` to clone `kubespray` repo before you start to work. + +`cluster` folder stores the infrastructure components, applications manifests and [`kubespray`](https://kubespray.io/) configuration files for entire cluster. + +`cluster/manifests//` stores the manifests of components or applications. + +`cluster/inventory.ini` stores the current state of the cluster, if you want to add new nodes to cluster, you need edit this file. + +`cluster/group_vars` describes current configuration states of the cluster. diff --git a/assets/layout.png b/assets/layout.png new file mode 100644 index 00000000..39b1800d Binary files /dev/null and b/assets/layout.png differ diff --git a/cluster/group_vars/all/all.yml b/cluster/group_vars/all/all.yml new file mode 100644 index 00000000..efabd051 --- /dev/null +++ b/cluster/group_vars/all/all.yml @@ -0,0 +1,137 @@ +--- +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +# upstream_dns_servers: +# - 8.8.8.8 +# - 8.8.4.4 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values only 'external' after K8s v1.31. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere', 'huaweicloud' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed +# http_proxy: "" +# https_proxy: "" +# https_proxy_cert_file: "" + +## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes +## in the no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: true +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases. +allow_unsupported_distribution_setup: false diff --git a/cluster/group_vars/all/containerd.yml b/cluster/group_vars/all/containerd.yml new file mode 100644 index 00000000..906437df --- /dev/null +++ b/cluster/group_vars/all/containerd.yml @@ -0,0 +1,59 @@ +--- +# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options + +# containerd_storage_dir: "/var/lib/containerd" +# containerd_state_dir: "/run/containerd" +# containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# Containerd debug socket location: unix or tcp format +# containerd_debug_address: "" + +# Containerd log level +# containerd_debug_level: "info" + +# Containerd logs format, supported values: text, json +# containerd_debug_format: "" + +# Containerd debug socket UID +# containerd_debug_uid: 0 + +# Containerd debug socket GID +# containerd_debug_gid: 0 + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +# Registries defined within containerd. +# containerd_registries_mirrors: +# - prefix: docker.io +# mirrors: +# - host: https://registry-1.docker.io +# capabilities: ["pull", "resolve"] +# skip_verify: false + +# containerd_max_container_log_line_size: 16384 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/cluster/group_vars/all/etcd.yml b/cluster/group_vars/all/etcd.yml new file mode 100644 index 00000000..39600c35 --- /dev/null +++ b/cluster/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host diff --git a/cluster/group_vars/etcd.yml b/cluster/group_vars/etcd.yml new file mode 100644 index 00000000..075034e1 --- /dev/null +++ b/cluster/group_vars/etcd.yml @@ -0,0 +1,38 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +etcd_memory_limit: "0" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +etcd_quota_backend_bytes: "4294967296" # Double for 2G by Sun Zhenyu + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true + +## Enable distributed tracing +## To enable this experimental feature, set the etcd_experimental_enable_distributed_tracing: true, along with the +## etcd_experimental_distributed_tracing_sample_rate to choose how many samples to collect per million spans, +## the default sampling rate is 0 https://etcd.io/docs/v3.5/op-guide/monitoring/#distributed-tracing +# etcd_experimental_enable_distributed_tracing: false +# etcd_experimental_distributed_tracing_sample_rate: 100 +# etcd_experimental_distributed_tracing_address: "localhost:4317" +# etcd_experimental_distributed_tracing_service_name: etcd + +## The interval for etcd watch progress notify events +# etcd_experimental_watch_progress_notify_interval: 5s diff --git a/cluster/group_vars/k8s_cluster/addons.yml b/cluster/group_vars/k8s_cluster/addons.yml new file mode 100644 index 00000000..437b2682 --- /dev/null +++ b/cluster/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,290 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: true + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_container_port: 10250 +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 15s +# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# COMMENTS: Use OpenEBS replace Local Path Provisioner @ Sun Zhenyu +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.24" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# COMMENTS: Use OpenEBS replace Local Volume Provisioner @ Sun Zhenyu +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an external CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Gateway API CRDs +gateway_api_enabled: false +# gateway_api_experimental_channel: false + +# COMMENTS: NGINX Ingress will manually installed @ Sun Zhenyu +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +# ingress_nginx_service_type: LoadBalancer +# ingress_nginx_service_annotations: +# example.io/loadbalancerIPs: 1.2.3.4 +# ingress_nginx_service_nodeport_http: 30080 +# ingress_nginx_service_nodeport_https: 30081 +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +# ingress_nginx_without_class: true +# ingress_nginx_default: false + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# COMMENTS: Cert Manager will manually installed @ Sun Zhenyu +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# cert_manager_dns_policy: "ClusterFirst" +# cert_manager_dns_config: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +# cert_manager_controller_extra_args: +# - "--dns01-recursive-nameservers-only=true" +# - "--dns01-recursive-nameservers=1.1.1.1:53,8.8.8.8:53" + +# COMMENTS: Currently we using Azure LB for cluster @ Sun Zhenyu +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +metallb_namespace: "metallb-system" +# metallb_version: v0.13.9 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_config: +# speaker: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# controller: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# address_pools: +# primary: +# ip_range: +# - 10.5.0.0/16 +# auto_assign: true +# pool1: +# ip_range: +# - 10.6.0.0/16 +# auto_assign: true +# pool2: +# ip_range: +# - 10.10.0.0/16 +# auto_assign: true +# layer2: +# - primary +# layer3: +# defaults: +# peer_port: 179 +# hold_time: 120s +# communities: +# vpn-only: "1234:1" +# NO_ADVERTISE: "65535:65282" +# metallb_peers: +# peer1: +# peer_address: 10.6.0.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# communities: +# - vpn-only +# address_pool: +# - pool1 +# peer2: +# peer_address: 10.10.0.1 +# peer_asn: 64513 +# my_asn: 4200000000 +# communities: +# - NO_ADVERTISE +# address_pool: +# - pool2 + +# COMMENTS: Argo CD will manually installed @ Sun Zhenyu +argocd_enabled: false +# argocd_version: v2.11.0 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated and stored in `argocd-initial-admin-secret` in the argocd namespace defined above. +# Using the argocd CLI the generated password can be automatically be fetched from the current kubectl context with the command: +# argocd admin initial-password -n argocd +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# COMMENTS: Krew is useful to find and install kubectl plugins, so we enable it @ Sun Zhenyu +# The plugin manager for kubectl +krew_enabled: true +krew_root_dir: "/usr/local/krew" + +# Kube VIP +kube_vip_enabled: false +# kube_vip_arp_enabled: true +# kube_vip_controlplane_enabled: true +# kube_vip_address: 192.168.56.120 +# loadbalancer_apiserver: +# address: "{{ kube_vip_address }}" +# port: 6443 +# kube_vip_interface: eth0 +# kube_vip_services_enabled: false +# kube_vip_dns_mode: first +# kube_vip_cp_detect: false +# kube_vip_leasename: plndr-cp-lock +# kube_vip_enable_node_labeling: false + +# Node Feature Discovery +node_feature_discovery_enabled: false +# node_feature_discovery_gc_sa_name: node-feature-discovery +# node_feature_discovery_gc_sa_create: false +# node_feature_discovery_worker_sa_name: node-feature-discovery +# node_feature_discovery_worker_sa_create: false +# node_feature_discovery_master_config: +# extraLabelNs: ["nvidia.com"] diff --git a/cluster/group_vars/k8s_cluster/k8s-cluster.yml b/cluster/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 00000000..96652983 --- /dev/null +++ b/cluster/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,387 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.31.4 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook or kube_apiserver_authorization_config_authorizers must configure a type: Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: {{ inventory_hostname }} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: freeleaps.cluster +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential +# Apply extra options to coredns kubernetes plugin +# coredns_kubernetes_extra_opts: +# - 'fallthrough example.local' +# Forward extra domains to the coredns kubernetes plugin +# coredns_kubernetes_extra_domains: '' + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: true + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +# kubelet_runtime_cgroups: "/{{ kube_service_cgroups }}/{{ container_manager }}.service" +# kubelet_kubelet_cgroups: "/{{ kube_service_cgroups }}/kubelet.service" + +## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" +# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +# Whether to run kubelet and container-engine daemons in a dedicated cgroup. +# kube_reserved: false +## Uncomment to override default values +## The following two items need to be set when kube_reserved is true +# kube_reserved_cgroups_for_service_slice: kube.slice +# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" +# kube_memory_reserved: 256Mi +# kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" +# Reservation for control plane hosts +# kube_master_memory_reserved: 512Mi +# kube_master_cpu_reserved: 200m +# kube_master_ephemeral_storage_reserved: 2Gi +# kube_master_pid_reserved: "1000" + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +## The following two items need to be set when system_reserved is true +# system_reserved_cgroups_for_service_slice: system.slice +# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}" +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +# COMMENTS: Enable the auto renew certificates @ Sun Zhenyu +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: true +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +kubeadm_patches_dir: "{{ kube_config_dir }}/patches" +kubeadm_patches: [] +# See https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#patches +# Correspondance with this link +# patchtype = type +# target = target +# suffix -> managed automatically +# extension -> always "yaml" +# kubeadm_patches: +# - target: kube-apiserver|kube-controller-manager|kube-scheduler|etcd|kubeletconfiguration +# type: strategic(default)|json|merge +# patch: +# metadata: +# annotations: +# example.com/test: "true" +# labels: +# example.com/prod_level: "{{ prod_level }}" +# - ... +# Patches are applied in the order they are specified. + +# Set to true to remove the role binding to anonymous users created by kubeadm +remove_anonymous_access: false diff --git a/cluster/group_vars/k8s_cluster/k8s-net-calico.yml b/cluster/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 00000000..9089bac8 --- /dev/null +++ b/cluster/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,132 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: freeleaps-network-system + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true +# nat_outgoing_ipv6: false + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +calico_pool_name: "freeleaps-network-ip-pool" + +# add default ippool blockSize +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutually exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/cluster/inventory.ini b/cluster/inventory.ini new file mode 100644 index 00000000..344de3d9 --- /dev/null +++ b/cluster/inventory.ini @@ -0,0 +1,8 @@ +# Configure 'ip' variable to bind kubernetes services on a different ip than the default iface +# We should set etcd_member_name for etcd cluster. The node that are not etcd members do not need to set the value, +# or can set the empty string value. +[kube_control_plane] + +[etcd:children] + +[kube_node] \ No newline at end of file diff --git a/cluster/manifests/freeleaps-controls-system/cert-manager/.gitkeep b/cluster/manifests/freeleaps-controls-system/cert-manager/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/freeleaps-controls-system/godaddy-webhook/.gitkeep b/cluster/manifests/freeleaps-controls-system/godaddy-webhook/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/freeleaps-controls-system/ingress-nginx/.gitkeep b/cluster/manifests/freeleaps-controls-system/ingress-nginx/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/freeleaps-devops-system/argo-cd/values.yaml b/cluster/manifests/freeleaps-devops-system/argo-cd/values.yaml new file mode 100644 index 00000000..3d4c6c1c --- /dev/null +++ b/cluster/manifests/freeleaps-devops-system/argo-cd/values.yaml @@ -0,0 +1,2294 @@ +## Argo CD configuration +## Ref: https://github.com/argoproj/argo-cd +## +# -- Provide a name in place of `argocd` +nameOverride: argocd +# -- Override the namespace +namespaceOverride: "freeleaps-devops-system" +# -- Create aggregated roles that extend existing cluster roles to interact with argo-cd resources +## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles +createAggregateRoles: false +# -- Create cluster roles for cluster-wide installation. +## Used when you manage applications in the same cluster where Argo CD runs +createClusterRoles: true + +## Custom resource configuration +crds: + # -- Install and upgrade CRDs + install: true + # -- Keep CRDs on chart uninstall + keep: true + +## Globally shared configuration +global: + # -- Default domain used by all components + ## Used for ingresses, certificates, SSO, notifications, etc. + domain: argo.mathmast.com + # -- Number of old deployment ReplicaSets to retain. The rest will be garbage collected. + revisionHistoryLimit: 3 + # Default image used by all components + image: + # -- If defined, a repository applied to all Argo CD deployments + repository: quay.io/argoproj/argocd + # -- If defined, a imagePullPolicy applied to all Argo CD deployments + imagePullPolicy: IfNotPresent + # Default logging options used by all components + logging: + # -- Set the global logging format. Either: `text` or `json` + format: text + # -- Set the global logging level. One of: `debug`, `info`, `warn` or `error` + level: info + # -- Add Prometheus scrape annotations to all metrics services. This can be used as an alternative to the ServiceMonitors. + addPrometheusAnnotations: false + # Default network policy rules used by all components + networkPolicy: + # -- Create NetworkPolicy objects for all components + create: false + # -- Default deny all ingress traffic + defaultDenyIngress: false + +## Argo Configs +configs: + # General Argo CD configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml + cm: + # -- Create the argocd-cm configmap for [declarative setup] + create: true + # -- The name of tracking label used by Argo CD for resource pruning + application.instanceLabelKey: argocd.argoproj.io/instance + # -- Enable logs RBAC enforcement + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement + server.rbac.log.enforce.enable: false + # -- Enable exec feature in Argo UI + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource + exec.enabled: true + # -- Enable local admin user + ## Ref: https://argo-cd.readthedocs.io/en/latest/faq/#how-to-disable-admin-user + admin.enabled: true + # -- Timeout to discover if a new manifests version got published to the repository + timeout.reconciliation: 180s + # -- Timeout to refresh application data as well as target manifests cache + timeout.hard.reconciliation: 0s + # -- Enable Status Badge + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/status-badge/ + statusbadge.enabled: true + # OIDC configuration as an alternative to dex (optional). + # oidc.config: | + # name: AzureAD + # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 + # clientID: CLIENT_ID + # clientSecret: $oidc.azuread.clientSecret + # rootCA: | + # -----BEGIN CERTIFICATE----- + # ... encoded certificate data here ... + # -----END CERTIFICATE----- + # requestedIDTokenClaims: + # groups: + # essential: true + # requestedScopes: + # - openid + # - profile + # - email + + # Argo CD configuration parameters + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cmd-params-cm.yaml + params: + # -- Create the argocd-cmd-params-cm configmap + # If false, it is expected the configmap will be created by something else. + create: true + ## Controller Properties + # -- Number of application status processors + controller.status.processors: 20 + # -- Number of application operation processors + controller.operation.processors: 10 + # -- Specifies timeout between application self heal attempts + controller.self.heal.timeout.seconds: 5 + # -- Repo server RPC call timeout seconds. + controller.repo.server.timeout.seconds: 60 + ## Server properties + # -- Run server without TLS + ## NOTE: This value should be set when you generate params by other means as it changes ports used by ingress template. + ## NOTE: We are using Ingress for serve TLS request and redirect none-TLS request to actual service. + server.insecure: true + # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / + server.basehref: / + # -- Directory path that contains additional static assets + server.staticassets: /shared/app + # -- Disable Argo CD RBAC for user authentication + server.disable.auth: false + # -- Enable GZIP compression + server.enable.gzip: true + # -- Enable proxy extension feature. (proxy extension is in Alpha phase) + server.enable.proxy.extension: false + # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "". + server.x.frame.options: sameorigin + ## Repo-server properties + # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. + reposerver.parallelism.limit: 0 + ## ApplicationSet Properties + # -- Modify how application is synced between the generator and the cluster. One of: `sync`, `create-only`, `create-update`, `create-delete` + applicationsetcontroller.policy: sync + # -- Enables use of the Progressive Syncs capability + applicationsetcontroller.enable.progressive.syncs: false + # -- JQ Path expression timeout + ## By default, the evaluation of a JQPathExpression is limited to one second. + ## If you encounter a "JQ patch execution timed out" error message due to a complex JQPathExpression + ## that requires more time to evaluate, you can extend the timeout period. + controller.ignore.normalizer.jq.timeout: "1s" + + # Argo CD RBAC policy configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md + rbac: + # -- Create the argocd-rbac-cm configmap with ([Argo CD RBAC policy]) definitions. + # If false, it is expected the configmap will be created by something else. + # Argo CD will not work if there is no configmap created with the name above. + create: true + # -- OIDC scopes to examine during rbac enforcement (in addition to `sub` scope). + # The scope value can be a string, or a list of strings. + scopes: "[groups]" + # -- Matcher function for Casbin, `glob` for glob matcher and `regex` for regex matcher. + policy.matchMode: "glob" + # SSH known hosts for Git repositories + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#ssh-known-host-public-keys + ssh: + # -- Specifies if the argocd-ssh-known-hosts-cm configmap should be created by Helm. + create: true + # -- Known hosts to be added to the known host list by default. + # @default -- See [values.yaml] + knownHosts: | + [ssh.github.com]:443 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + [ssh.github.com]:443 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + [ssh.github.com]:443 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE= + bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M= + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= + gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf + gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 + ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + + # Argo CD sensitive data + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + secret: + # -- Create the argocd-secret + createSecret: true + # -- Bcrypt hashed admin password + ## Argo expects the password in the secret to be bcrypt hashed. You can create this hash with + ## `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` + argocdServerAdminPassword: "" + # -- Admin password modification time. Eg. `"2006-01-02T15:04:05Z"` + # @default -- `""` (defaults to current time) + argocdServerAdminPasswordMtime: "" + +## Application controller +controller: + # -- Application controller name string + name: application-controller + # -- The number of application controller pods to run. + # Additional replicas will cause sharding of managed clusters across number of replicas. + ## With dynamic cluster distribution turned on, sharding of the clusters will gracefully + ## rebalance if the number of replica's changes or one becomes unhealthy. (alpha) + replicas: 1 + # -- Enable dynamic cluster distribution (alpha) + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution + ## This is done using a deployment instead of a statefulSet + ## When replicas are added or removed, the sharding algorithm is re-run to ensure that the + ## clusters are distributed according to the algorithm. If the algorithm is well-balanced, + ## like round-robin, then the shards will be well-balanced. + dynamicClusterDistribution: false + # -- Application controller heartbeat time + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution/#working-of-dynamic-distribution + heartbeatTime: 10 + # -- Maximum number of controller revisions that will be maintained in StatefulSet history + revisionHistoryLimit: 5 + ## Application controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the application controller + enabled: true + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "1" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `controller.pdb.minAvailable` + maxUnavailable: "0" + ## Application controller emptyDir volumes + emptyDir: + # -- EmptyDir size limit for application controller + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # -- Resource limits and requests for the application controller pods + resources: + limits: + cpu: "1" + memory: "1Gi" + requests: + cpu: "250m" + memory: "512Mi" + # Application controller container ports + containerPorts: + # -- Metrics container port + metrics: 8082 + # -- Host Network for application controller pods + hostNetwork: false + # -- Alternative DNS policy for application controller pods + dnsPolicy: "ClusterFirst" + # -- Application controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + # Readiness probe for application controller + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + serviceAccount: + # -- Create a service account for the application controller + create: true + # -- Service account name + name: argocd-application-controller + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + ## Application controller metrics configuration + metrics: + # -- Deploy metrics service + enabled: true + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service port + servicePort: 8082 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: true + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor namespace + namespace: "freeleaps-monitoring-system" + rules: + # -- Deploy a PrometheusRule for the application controller + enabled: true + # -- PrometheusRule namespace + namespace: "freeleaps-monitoring-system" # "monitoring" + # -- PrometheusRule.Spec for the application controller + spec: + - alert: ArgoAppMissing + expr: | + absent(argocd_app_info) == 1 + for: 15m + labels: + severity: critical + annotations: + summary: "[Argo CD] No reported applications" + description: > + Argo CD has not reported any applications data for the past 15 minutes which + means that it must be down or not functioning properly. This needs to be + resolved for this cloud to continue to maintain state. + - alert: ArgoAppNotSynced + expr: | + argocd_app_info{sync_status!="Synced"} == 1 + for: 12h + labels: + severity: warning + annotations: + summary: "[{{`{{$labels.name}}`}}] Application not synchronized" + description: > + The application [{{`{{$labels.name}}`}} has not been synchronized for over + 12 hours which means that the state of this cloud has drifted away from the + state inside Git. + +## Dex +dex: + # FIXME: we need self-hosted dex if required @ Zhenyu Sun + enabled: false + +## Redis +redis: + # -- Enable redis + enabled: true + # -- Redis name + name: argo-cd-dedicated-redis + ## Redis Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Redis + enabled: true + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "1" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `redis.pdb.minAvailable` + maxUnavailable: "0" + ## Redis image + image: + # -- Redis repository + repository: public.ecr.aws/docker/library/redis + # -- Redis tag + tag: 7.4.1-alpine + ## Prometheus redis-exporter sidecar + exporter: + # -- Enable Prometheus redis-exporter sidecar + enabled: true + ## Prometheus redis-exporter image + image: + # -- Repository to use for the redis-exporter + repository: public.ecr.aws/bitnami/redis-exporter + # -- Tag to use for the redis-exporter + tag: 1.58.0 + # -- Redis exporter security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## Probes for Redis exporter (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for Redis exporter (optional) + enabled: true + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + livenessProbe: + # -- Enable Kubernetes liveness probe for Redis exporter + enabled: true + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + # -- Resource limits and requests for redis-exporter sidecar + resources: + limits: + cpu: "50m" + memory: "64Mi" + requests: + cpu: "10m" + memory: "32Mi" + ## Probes for Redis server (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for Redis server + enabled: true + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + livenessProbe: + # -- Enable Kubernetes liveness probe for Redis server + enabled: true + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + # -- Resource limits and requests for redis + resources: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "128Mi" + # -- Redis pod-level security context + # @default -- See [values.yaml] + securityContext: + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault + # Redis container ports + containerPorts: + # -- Redis container port + redis: 6379 + # -- Metrics container port + metrics: 9121 + # -- Alternative DNS policy for Redis server pods + dnsPolicy: "ClusterFirst" + # -- Redis container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- Redis service port + servicePort: 6379 + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + serviceAccount: + # -- Create a service account for the redis pod + create: false + # -- Service account name for redis pod + name: "argo-cd-dedicated-redis-sa" + # -- Automount API credentials for the Service Account + automountServiceAccountToken: false + + metrics: + # -- Deploy metrics service + enabled: true + # Redis metrics service configuration + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: None + # -- Metrics service port + servicePort: 9121 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: true + # -- Interval at which metrics should be scraped + interval: 30s + # -- Prometheus ServiceMonitor namespace + namespace: "freeleaps-monitoring-system" + +redisSecretInit: + # -- Enable Redis secret initialization. If disabled, secret must be provisioned by alternative methods + enabled: true + # -- Redis secret-init name + name: argo-cd-dedicated-redis-secret-init + # -- Application controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccount: + # -- Create a service account for the redis pod + create: false + +## Server +server: + # -- Argo CD server name + name: server + # -- The number of server pods to run + replicas: 1 + ## Argo CD server Horizontal Pod Autoscaler + autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo CD server + enabled: false + ## Argo CD server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Argo CD server + enabled: true + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "1" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `server.pdb.minAvailable` + maxUnavailable: "0" + ## Argo CD server emptyDir volumes + emptyDir: + # -- EmptyDir size limit for the Argo CD server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # -- Resource limits and requests for the Argo CD server + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "250m" + memory: "256Mi" + # Server container ports + containerPorts: + # -- Server container port + server: 8080 + # -- Metrics container port + metrics: 8083 + # -- Host Network for Server pods + hostNetwork: false + # -- Alternative DNS policy for Server pods + dnsPolicy: "ClusterFirst" + # -- Server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + # TLS certificate configuration via cert-manager + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificate: + # -- Deploy a Certificate resource (requires cert-manager) + enabled: false + # -- Certificate primary domain (commonName) + # @default -- `""` (defaults to global.domain) + domain: "" + # -- Certificate Subject Alternate Names (SANs) + additionalHosts: [] + # -- The requested 'duration' (i.e. lifetime) of the certificate. + # @default -- `""` (defaults to 2160h = 90d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + duration: "" + # -- How long before the expiry a certificate should be renewed. + # @default -- `""` (defaults to 360h = 15d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + renewBefore: "" + # Certificate issuer + ## Ref: https://cert-manager.io/docs/concepts/issuer + issuer: + # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` + group: "" + # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer` + kind: "" + # -- Certificate issuer name. Eg. `letsencrypt` + name: "" + # Private key of the certificate + privateKey: + # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` + rotationPolicy: Never + # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` + encoding: PKCS1 + # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` + algorithm: RSA + # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. + size: 2048 + # -- Annotations to be applied to the Server Certificate + annotations: {} + # -- Usages for the certificate + ### Ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.KeyUsage + usages: [] + # -- Annotations that allow the certificate to be composed from data residing in existing Kubernetes Resources + secretTemplateAnnotations: {} + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificateSecret: + # -- Create argocd-server-tls secret + enabled: false + # -- Annotations to be added to argocd-server-tls secret + annotations: {} + # -- Labels to be added to argocd-server-tls secret + labels: {} + # -- Private Key of the certificate + key: '' + # -- Certificate data + crt: '' + + ## Server service configuration + service: + # -- Server service annotations + annotations: {} + # -- Server service labels + labels: {} + # -- Server service type + type: ClusterIP + # -- Server service http port for NodePort service type (only if `server.service.type` is set to "NodePort") + nodePortHttp: 30080 + # -- Server service https port for NodePort service type (only if `server.service.type` is set to "NodePort") + nodePortHttps: 30443 + # -- Server service http port + servicePortHttp: 80 + # -- Server service https port + servicePortHttps: 443 + # -- Server service http port name, can be used to route traffic via istio + servicePortHttpName: http + # -- Server service https port name, can be used to route traffic via istio + servicePortHttpsName: https + # -- Server service https port appProtocol + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + servicePortHttpsAppProtocol: "" + # -- The class of the load balancer implementation + loadBalancerClass: "" + # -- LoadBalancer will get created with the IP specified in this field + loadBalancerIP: "" + # -- Source IP ranges to allow access to service from + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + loadBalancerSourceRanges: [] + # -- Server service external IPs + externalIPs: [] + # -- Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: Cluster + # -- Used to maintain session affinity. Supports `ClientIP` and `None` + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None + + ## Server metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8083 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + # -- When true, honorLabels preserves the metric’s labels when they collide with the target’s labels. + honorLabels: false + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create server service account + create: true + # -- Server service account name + name: argocd-server + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # Argo CD server ingress configuration + ingress: + # -- Enable an ingress resource for the Argo CD server + enabled: false + # -- Specific implementation for ingress controller. One of `generic`, `aws` or `gke` + ## Additional configuration might be required in related configuration sections + controller: generic + # -- Additional ingress labels + labels: {} + # -- Additional ingress annotations + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#option-1-ssl-passthrough + annotations: {} + # nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + # nginx.ingress.kubernetes.io/ssl-passthrough: "true" + + # -- Defines which ingress controller will implement the resource + ingressClassName: "" + + # -- Argo CD server hostname + # @default -- `""` (defaults to global.domain) + hostname: "" + + # -- The path to Argo CD server + path: / + + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + + # -- Enable TLS configuration for the hostname defined at `server.ingress.hostname` + ## TLS certificate will be retrieved from a TLS secret `argocd-server-tls` + ## You can create this secret via `certificate` or `certificateSecret` option + tls: false + + # -- The list of additional hostnames to be covered by ingress record + # @default -- `[]` (See [values.yaml]) + extraHosts: [] + # - name: argocd.example.com + # path: / + + # -- Additional ingress paths + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Additional ingress rules + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraRules: [] + # - http: + # paths: + # - path: / + # pathType: Prefix + # backend: + # service: + # name: '{{ include "argo-cd.server.fullname" . }}' + # port: + # name: '{{ .Values.server.service.servicePortHttpsName }}' + + # -- Additional TLS configuration + # @default -- `[]` (See [values.yaml]) + extraTls: [] + # - hosts: + # - argocd.example.com + # secretName: your-certificate-name + + # AWS specific options for Application Load Balancer + # Applies only when `serv.ingress.controller` is set to `aws` + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#aws-application-load-balancers-albs-and-classic-elb-http-mode + aws: + # -- Backend protocol version for the AWS ALB gRPC service + ## This tells AWS to send traffic from the ALB using gRPC. + ## For more information: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#health-check-settings + backendProtocolVersion: GRPC + # -- Service type for the AWS ALB gRPC service + ## Can be of type NodePort or ClusterIP depending on which mode you are running. + ## Instance mode needs type NodePort, IP mode needs type ClusterIP + ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic + serviceType: NodePort + + # Google specific options for Google Application Load Balancer + # Applies only when `server.ingress.controller` is set to `gke` + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#google-cloud-load-balancers-with-kubernetes-ingress + gke: + # -- Google [BackendConfig] resource, for use with the GKE Ingress Controller + # @default -- `{}` (See [values.yaml]) + ## Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters + backendConfig: {} + # iap: + # enabled: true + # oauthclientCredentials: + # secretName: argocd-secret + + # -- Google [FrontendConfig] resource, for use with the GKE Ingress Controller + # @default -- `{}` (See [values.yaml]) + ## Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters + frontendConfig: {} + # redirectToHttps: + # enabled: true + # responseCodeName: RESPONSE_CODE + + # Managed GKE certificate for ingress hostname + managedCertificate: + # -- Create ManagedCertificate resource and annotations for Google Load balancer + ## Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs + create: true + # -- Additional domains for ManagedCertificate resource + extraDomains: [] + # - argocd.example.com + + # Dedicated gRPC ingress for ingress controllers that supports only single backend protocol per Ingress resource + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#option-2-multiple-ingress-objects-and-hosts + ingressGrpc: + # -- Enable an ingress resource for the Argo CD server for dedicated [gRPC-ingress] + enabled: false + # -- Additional ingress annotations for dedicated [gRPC-ingress] + annotations: {} + # -- Additional ingress labels for dedicated [gRPC-ingress] + labels: {} + # -- Defines which ingress controller will implement the resource [gRPC-ingress] + ingressClassName: "" + + # -- Argo CD server hostname for dedicated [gRPC-ingress] + # @default -- `""` (defaults to grpc.`server.ingress.hostname`) + hostname: "" + + # -- Argo CD server ingress path for dedicated [gRPC-ingress] + path: / + + # -- Ingress path type for dedicated [gRPC-ingress]. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + + # -- Enable TLS configuration for the hostname defined at `server.ingressGrpc.hostname` + ## TLS certificate will be retrieved from a TLS secret with name: `argocd-server-grpc-tls` + tls: false + + # -- The list of additional hostnames to be covered by ingress record + # @default -- `[]` (See [values.yaml]) + extraHosts: [] + # - name: grpc.argocd.example.com + # path: / + + # -- Additional ingress paths for dedicated [gRPC-ingress] + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Additional ingress rules + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraRules: [] + # - http: + # paths: + # - path: / + # pathType: Prefix + # backend: + # service: + # name: '{{ include "argo-cd.server.fullname" . }}' + # port: + # name: '{{ .Values.server.service.servicePortHttpName }}' + + # -- Additional TLS configuration for dedicated [gRPC-ingress] + # @default -- `[]` (See [values.yaml]) + extraTls: [] + # - secretName: your-certificate-name + # hosts: + # - argocd.example.com + + # Create a OpenShift Route with SSL passthrough for UI and CLI + # Consider setting 'hostname' e.g. https://argocd.apps-crc.testing/ using your Default Ingress Controller Domain + # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain: + # If 'hostname' is an empty string "" OpenShift will create a hostname for you. + route: + # -- Enable an OpenShift Route for the Argo CD server + enabled: false + # -- Openshift Route annotations + annotations: {} + # -- Hostname of OpenShift Route + hostname: "" + # -- Termination type of Openshift Route + termination_type: passthrough + # -- Termination policy of Openshift Route + termination_policy: None + + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the server's ClusterRole resource + enabled: false + # -- List of custom rules for the server's ClusterRole resource + rules: [] + +## Repo Server +repoServer: + # -- Repo server name + name: repo-server + + # -- The number of repo server pods to run + replicas: 1 + + # -- Runtime class name for the repo server + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## Repo server Horizontal Pod Autoscaler + autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the repo server + enabled: false + # -- Minimum number of replicas for the repo server [HPA] + minReplicas: 1 + # -- Maximum number of replicas for the repo server [HPA] + maxReplicas: 5 + # -- Average CPU utilization percentage for the repo server [HPA] + targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the repo server [HPA] + targetMemoryUtilizationPercentage: 50 + # -- Configures the scaling behavior of the target in both Up and Down directions. + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Configures custom HPA metrics for the Argo CD repo server + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + metrics: [] + + ## Repo server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the repo server + enabled: false + # -- Labels to be added to repo server pdb + labels: {} + # -- Annotations to be added to repo server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `repoServer.pdb.minAvailable` + maxUnavailable: "" + + ## Repo server image + image: + # -- Repository to use for the repo server + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the repo server + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the repo server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to repo server + extraArgs: [] + + # -- Environment variables to pass to repo server + env: [] + + # -- envFrom to pass to repo server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Specify postStart and preStop lifecycle hooks for your argo-repo-server container + lifecycle: {} + + # -- Additional containers to be added to the repo server pod + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/ + ## Note: Supports use of custom Helm templates + extraContainers: [] + # - name: cmp-my-plugin + # command: + # - "/var/run/argocd/argocd-cmp-server" + # image: busybox + # securityContext: + # runAsNonRoot: true + # runAsUser: 999 + # volumeMounts: + # - mountPath: /var/run/argocd + # name: var-files + # - mountPath: /home/argocd/cmp-server/plugins + # name: plugins + # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + # - mountPath: /home/argocd/cmp-server/config/plugin.yaml + # subPath: my-plugin.yaml + # name: argocd-cmp-cm + # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # # mitigate path traversal attacks. + # - mountPath: /tmp + # name: cmp-tmp + # - name: cmp-my-plugin2 + # command: + # - "/var/run/argocd/argocd-cmp-server" + # image: busybox + # securityContext: + # runAsNonRoot: true + # runAsUser: 999 + # volumeMounts: + # - mountPath: /var/run/argocd + # name: var-files + # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + # - mountPath: /home/argocd/cmp-server/plugins + # name: plugins + # - mountPath: /home/argocd/cmp-server/config/plugin.yaml + # subPath: my-plugin2.yaml + # name: argocd-cmp-cm + # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # # mitigate path traversal attacks. + # - mountPath: /tmp + # name: cmp-tmp + + # -- Init containers to add to the repo server pods + initContainers: [] + + # -- Additional volumeMounts to the repo server main container + volumeMounts: [] + + # -- Additional volumes to the repo server pod + volumes: [] + # - name: argocd-cmp-cm + # configMap: + # name: argocd-cmp-cm + # - name: cmp-tmp + # emptyDir: {} + + # -- Volumes to be used in replacement of emptydir on default volumes + existingVolumes: {} + # gpgKeyring: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-keyring + # helmWorkingDir: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-workdir + # tmp: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-tmp + # varFiles: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-varfiles + # plugins: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-plugins + + ## RepoServer emptyDir volumes + emptyDir: + # -- EmptyDir size limit for repo server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + # -- Toggle the usage of a ephemeral Helm working directory + useEphemeralHelmWorkingDir: true + + # -- Annotations to be added to repo server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to repo server pods + podAnnotations: {} + + # -- Labels to be added to repo server pods + podLabels: {} + + # -- Resource limits and requests for the repo server pods + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + # Repo server container ports + containerPorts: + # -- Repo server container port + server: 8081 + # -- Metrics container port + metrics: 8084 + + # -- Host Network for Repo server pods + hostNetwork: false + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for Repo server pods + dnsPolicy: "ClusterFirst" + + # -- Repo server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the repo server + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the repo server Deployment + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # -- Priority class for the repo server pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-repo-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart repo server automatically without extra controllers. + certificateSecret: + # -- Create argocd-repo-server-tls secret + enabled: false + # -- Annotations to be added to argocd-repo-server-tls secret + annotations: {} + # -- Labels to be added to argocd-repo-server-tls secret + labels: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Repo service (ie: argocd-repo-server, argocd-repo-server.argo-cd.svc) + crt: '' + + ## Repo server service configuration + service: + # -- Repo server service annotations + annotations: {} + # -- Repo server service labels + labels: {} + # -- Repo server service port + port: 8081 + # -- Repo server service port name + portName: tcp-repo-server + + ## Repo server metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8084 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + # -- When true, honorLabels preserves the metric’s labels when they collide with the target’s labels. + honorLabels: false + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## Enable Custom Rules for the Repo server's Cluster Role resource + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the Repo server's Cluster Role resource + enabled: false + # -- List of custom rules for the Repo server's Cluster Role resource + rules: [] + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + ## Repo server service account + ## If create is set to true, make sure to uncomment the name and update the rbac section below + serviceAccount: + # -- Create repo server service account + create: true + # -- Repo server service account name + name: "" # "argocd-repo-server" + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Repo server rbac rules + rbac: [] + # - apiGroups: + # - argoproj.io + # resources: + # - applications + # verbs: + # - get + # - list + # - watch + +## ApplicationSet controller +applicationSet: + # -- ApplicationSet controller name string + name: applicationset-controller + + # -- The number of ApplicationSet controller pods to run + replicas: 1 + + # -- Runtime class name for the ApplicationSet controller + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## ApplicationSet controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the ApplicationSet controller + enabled: false + # -- Labels to be added to ApplicationSet controller pdb + labels: {} + # -- Annotations to be added to ApplicationSet controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `applicationSet.pdb.minAvailable` + maxUnavailable: "" + + ## ApplicationSet controller image + image: + # -- Repository to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the ApplicationSet controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- If defined, uses a Secret to pull an image from a private Docker registry or repository. + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- ApplicationSet controller command line flags + extraArgs: [] + + # -- Environment variables to pass to the ApplicationSet controller + extraEnv: [] + # - name: "MY_VAR" + # value: "value" + + # -- envFrom to pass to the ApplicationSet controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the ApplicationSet controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the ApplicationSet controller pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + ## ApplicationSet controller emptyDir volumes + emptyDir: + # -- EmptyDir size limit for applicationSet controller + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + ## Metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8080 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + # -- When true, honorLabels preserves the metric’s labels when they collide with the target’s labels. + honorLabels: false + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## ApplicationSet service configuration + service: + # -- ApplicationSet service annotations + annotations: {} + # -- ApplicationSet service labels + labels: {} + # -- ApplicationSet service type + type: ClusterIP + # -- ApplicationSet service port + port: 7000 + # -- ApplicationSet service port name + portName: http-webhook + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create ApplicationSet controller service account + create: true + # -- ApplicationSet controller service account name + name: argocd-applicationset-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Annotations to be added to ApplicationSet controller Deployment + deploymentAnnotations: {} + + # -- Annotations for the ApplicationSet controller pods + podAnnotations: {} + + # -- Labels for the ApplicationSet controller pods + podLabels: {} + + # -- Resource limits and requests for the ApplicationSet controller pods. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # ApplicationSet controller container ports + containerPorts: + # -- Metrics container port + metrics: 8080 + # -- Probe container port + probe: 8081 + # -- Webhook container port + webhook: 7000 + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for ApplicationSet controller pods + dnsPolicy: "ClusterFirst" + + # -- ApplicationSet controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for ApplicationSet controller (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + livenessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the ApplicationSet controller + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the ApplicationSet controller Deployment + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # -- Priority class for the ApplicationSet controller pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # TLS certificate configuration via cert-manager + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-configuration + certificate: + # -- Deploy a Certificate resource (requires cert-manager) + enabled: false + # -- Certificate primary domain (commonName) + # @default -- `""` (defaults to global.domain) + domain: "" + # -- Certificate Subject Alternate Names (SANs) + additionalHosts: [] + # -- The requested 'duration' (i.e. lifetime) of the certificate. + # @default -- `""` (defaults to 2160h = 90d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + duration: "" + # -- How long before the expiry a certificate should be renewed. + # @default -- `""` (defaults to 360h = 15d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + renewBefore: "" + # Certificate issuer + ## Ref: https://cert-manager.io/docs/concepts/issuer + issuer: + # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` + group: "" + # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer` + kind: "" + # -- Certificate issuer name. Eg. `letsencrypt` + name: "" + # Private key of the certificate + privateKey: + # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` + rotationPolicy: Never + # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` + encoding: PKCS1 + # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` + algorithm: RSA + # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. + size: 2048 + # -- Annotations to be applied to the ApplicationSet Certificate + annotations: {} + + ## Ingress for the Git Generator webhook + ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration) + ingress: + # -- Enable an ingress resource for ApplicationSet webhook + enabled: false + # -- Additional ingress labels + labels: {} + # -- Additional ingress annotations + annotations: {} + + # -- Defines which ingress ApplicationSet controller will implement the resource + ingressClassName: "" + + # -- Argo CD ApplicationSet hostname + # @default -- `""` (defaults to global.domain) + hostname: "" + + # -- List of ingress paths + path: /api/webhook + + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + + # -- Enable TLS configuration for the hostname defined at `applicationSet.webhook.ingress.hostname` + ## TLS certificate will be retrieved from a TLS secret with name:`argocd-applicationset-controller-tls` + tls: false + + # -- The list of additional hostnames to be covered by ingress record + # @default -- `[]` (See [values.yaml]) + extraHosts: [] + # - name: argocd.example.com + # path: / + + # -- Additional ingress paths + # @default -- `[]` (See [values.yaml]) + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Additional ingress rules + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraRules: [] + # - http: + # paths: + # - path: /api/webhook + # pathType: Prefix + # backend: + # service: + # name: '{{ include "argo-cd.applicationSet.fullname" . }}' + # port: + # name: '{{ .Values.applicationSet.service.portName }}' + + # -- Additional ingress TLS configuration + # @default -- `[]` (See [values.yaml]) + extraTls: [] + # - secretName: argocd-applicationset-tls + # hosts: + # - argocd-applicationset.example.com + # -- Enable ApplicationSet in any namespace feature + allowAnyNamespace: false +## Notifications controller +notifications: + # -- Enable notifications controller + enabled: true + + # -- Notifications controller name string + name: notifications-controller + + # -- Argo CD dashboard url; used in place of {{.context.argocdUrl}} in templates + # @default -- `""` (defaults to https://`global.domain`) + argocdUrl: "" + + # -- Runtime class name for the notifications controller + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## Notifications controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the notifications controller + enabled: false + # -- Labels to be added to notifications controller pdb + labels: {} + # -- Annotations to be added to notifications controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `notifications.pdb.minAvailable` + maxUnavailable: "" + + ## Notifications controller image + image: + # -- Repository to use for the notifications controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the notifications controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the notifications controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Notifications controller log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- Notifications controller log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + # -- Extra arguments to provide to the notifications controller + extraArgs: [] + + # -- Additional container environment variables + extraEnv: [] + + # -- envFrom to pass to the notifications controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the notifications controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the notifications controller pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + # -- Define user-defined context + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/templates/#defining-user-defined-context + context: {} + # region: east + # environmentName: staging + + secret: + # -- Whether helm chart creates notifications controller secret + ## If true, will create a secret with the name below. Otherwise, will assume existence of a secret with that name. + create: true + + # -- notifications controller Secret name + name: "argocd-notifications-secret" + + # -- key:value pairs of annotations to be added to the secret + annotations: {} + + # -- key:value pairs of labels to be added to the secret + labels: {} + + # -- Generic key:value pairs to be inserted into the secret + ## Can be used for templates, notification services etc. Some examples given below. + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/overview/ + items: {} + # slack-token: + # # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/slack/ + + # grafana-apiKey: + # # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/grafana/ + + # webhooks-github-token: + + # email-username: + # email-password: + # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/email/ + + metrics: + # -- Enables prometheus metrics server + enabled: false + # -- Metrics port + port: 9001 + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- When true, honorLabels preserves the metric’s labels when they collide with the target’s labels. + honorLabels: false + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + + # -- Configures notification services such as slack, email or custom webhook + # @default -- See [values.yaml] + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/overview/ + notifiers: {} + # service.slack: | + # token: $slack-token + + # -- Annotations to be applied to the notifications controller Deployment + deploymentAnnotations: {} + + # -- Annotations to be applied to the notifications controller Pods + podAnnotations: {} + + # -- Labels to be applied to the notifications controller Pods + podLabels: {} + + # -- Resource limits and requests for the notifications controller + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Notification controller container ports + containerPorts: + # -- Metrics container port + metrics: 9001 + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for notifications controller Pods + dnsPolicy: "ClusterFirst" + + # -- Notification controller container-level security Context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for notifications controller Pods (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for notifications controller Pods + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + livenessProbe: + # -- Enable Kubernetes liveness probe for notifications controller Pods + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the application controller + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the notifications controller Deployment + deploymentStrategy: + type: Recreate + + # -- Priority class for the notifications controller pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create notifications controller service account + create: true + # -- Notification controller service account name + name: argocd-notifications-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + cm: + # -- Whether helm chart creates notifications controller config map + create: true + + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- List of custom rules for the notifications controller's ClusterRole resource + rules: [] + + # -- Contains centrally managed global application subscriptions + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/subscriptions/ + subscriptions: [] + # # subscription for on-sync-status-unknown trigger notifications + # - recipients: + # - slack:test2 + # - email:test@gmail.com + # triggers: + # - on-sync-status-unknown + # # subscription restricted to applications with matching labels only + # - recipients: + # - slack:test3 + # selector: test=true + # triggers: + # - on-sync-status-unknown + + # -- The notification template is used to generate the notification content + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/templates/ + templates: {} + # template.app-deployed: | + # email: + # subject: New version of an application {{.app.metadata.name}} is up and running. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests. + # slack: + # attachments: | + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # }, + # { + # "title": "Revision", + # "value": "{{.app.status.sync.revision}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-health-degraded: | + # email: + # subject: Application {{.app.metadata.name}} has degraded. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#f4c030", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-failed: | + # email: + # subject: Failed to sync application {{.app.metadata.name}}. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}} + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-running: | + # email: + # subject: Start syncing application {{.app.metadata.name}}. + # message: | + # The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#0DADEA", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-status-unknown: | + # email: + # subject: Application {{.app.metadata.name}} sync status is 'Unknown' + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # {{if ne .serviceType "slack"}} + # {{range $c := .app.status.conditions}} + # * {{$c.message}} + # {{end}} + # {{end}} + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-succeeded: | + # email: + # subject: Application {{.app.metadata.name}} has been successfully synced. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + + # -- The trigger defines the condition when the notification should be sent + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/triggers/ + triggers: {} + # trigger.on-deployed: | + # - description: Application is synced and healthy. Triggered once per commit. + # oncePer: app.status.sync.revision + # send: + # - app-deployed + # when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' + # trigger.on-health-degraded: | + # - description: Application has degraded + # send: + # - app-health-degraded + # when: app.status.health.status == 'Degraded' + # trigger.on-sync-failed: | + # - description: Application syncing has failed + # send: + # - app-sync-failed + # when: app.status.operationState.phase in ['Error', 'Failed'] + # trigger.on-sync-running: | + # - description: Application is being synced + # send: + # - app-sync-running + # when: app.status.operationState.phase in ['Running'] + # trigger.on-sync-status-unknown: | + # - description: Application status is 'Unknown' + # send: + # - app-sync-status-unknown + # when: app.status.sync.status == 'Unknown' + # trigger.on-sync-succeeded: | + # - description: Application syncing has succeeded + # send: + # - app-sync-succeeded + # when: app.status.operationState.phase in ['Succeeded'] + # + # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/triggers/#default-triggers + # defaultTriggers: | + # - on-sync-status-unknown diff --git a/cluster/manifests/freeleaps-devops-system/jenkins/.gitkeep b/cluster/manifests/freeleaps-devops-system/jenkins/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/freeleaps-devops-system/namespace.yaml b/cluster/manifests/freeleaps-devops-system/namespace.yaml new file mode 100644 index 00000000..5e6e77fb --- /dev/null +++ b/cluster/manifests/freeleaps-devops-system/namespace.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: freeleaps-devops-system + labels: + name: freeleaps-devops-system + +--- + +apiVersion: v1 +kind: ResourceQuota +metadata: + name: freeleaps-devops-system + namespace: freeleaps-devops-system +spec: + hard: + requests.cpu: "4" diff --git a/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/.gitkeep b/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/freeleaps-storage-system/azure-blob-storage-csi/.gitkeep b/cluster/manifests/freeleaps-storage-system/azure-blob-storage-csi/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/freeleaps-storage-system/open-ebs/.gitkeep b/cluster/manifests/freeleaps-storage-system/open-ebs/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cluster/manifests/helm-repos/REPO.list b/cluster/manifests/helm-repos/REPO.list new file mode 100644 index 00000000..c18e37e2 --- /dev/null +++ b/cluster/manifests/helm-repos/REPO.list @@ -0,0 +1,12 @@ +UPDATED_AT:20250109(UTC+8) +LAST_UPDATED_BY:SUNZHENYU + +bitnami,https://charts.bitnami.com/bitnami,force-update +argo,https://argoproj.github.io/argo-helm,force-update +prometheus-community,https://prometheus-community.github.io/helm-charts,force-update +jet-stack,https://charts.jetstack.io,force-update +ingress-nginx,https://kubernetes.github.io/ingress-nginx,force-update +jenkins-ci,https://charts.jenkins.io,force-update +openebs,https://openebs.github.io/openebs,force-update +azure-blob-csi-driver,https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts,force-update +godaddy-webhook,https://snowdrop.github.io/godaddy-webhook,force-update \ No newline at end of file