added ebs hostpath storage
This commit is contained in:
parent
e5fdf35669
commit
3e70f70fa3
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,6 +1,7 @@
|
|||||||
ansible/vault_password
|
ansible/vault_password
|
||||||
ansible/inventory/host_vars/*/vault.yaml
|
ansible/inventory/host_vars/*/vault.yaml
|
||||||
ansible/roles/k8s_network/files/calico
|
ansible/roles/k8s_network/files/calico
|
||||||
ansible/roles/k8s_storage/files/rook
|
ansible/roles/k8s_storage_rook/files/rook
|
||||||
|
ansible/roles/k8s_storage_ebs_manifests/files/ebs
|
||||||
.vscode
|
.vscode
|
||||||
*/vault.yaml
|
*/vault.yaml
|
@ -1,91 +0,0 @@
|
|||||||
apiVersion: ceph.rook.io/v1
|
|
||||||
kind: CephCluster
|
|
||||||
metadata:
|
|
||||||
name: rook-ceph
|
|
||||||
namespace: rook-ceph
|
|
||||||
spec:
|
|
||||||
annotations: null
|
|
||||||
cephVersion:
|
|
||||||
allowUnsupported: false
|
|
||||||
image: quay.io/ceph/ceph:v17.2.3
|
|
||||||
cleanupPolicy:
|
|
||||||
allowUninstallWithVolumes: false
|
|
||||||
confirmation: ''
|
|
||||||
sanitizeDisks:
|
|
||||||
dataSource: zero
|
|
||||||
iteration: 1
|
|
||||||
method: quick
|
|
||||||
continueUpgradeAfterChecksEvenIfNotHealthy: false
|
|
||||||
crashCollector:
|
|
||||||
disable: false
|
|
||||||
dashboard:
|
|
||||||
enabled: true
|
|
||||||
ssl: true
|
|
||||||
dataDirHostPath: /var/lib/rook
|
|
||||||
disruptionManagement:
|
|
||||||
machineDisruptionBudgetNamespace: openshift-machine-api
|
|
||||||
manageMachineDisruptionBudgets: false
|
|
||||||
managePodBudgets: true
|
|
||||||
osdMaintenanceTimeout: 30
|
|
||||||
pgHealthCheckTimeout: 0
|
|
||||||
healthCheck:
|
|
||||||
daemonHealth:
|
|
||||||
mon:
|
|
||||||
disabled: false
|
|
||||||
interval: 45s
|
|
||||||
osd:
|
|
||||||
disabled: false
|
|
||||||
interval: 60s
|
|
||||||
status:
|
|
||||||
disabled: false
|
|
||||||
interval: 60s
|
|
||||||
livenessProbe:
|
|
||||||
mgr:
|
|
||||||
disabled: false
|
|
||||||
mon:
|
|
||||||
disabled: false
|
|
||||||
osd:
|
|
||||||
disabled: false
|
|
||||||
startupProbe:
|
|
||||||
mgr:
|
|
||||||
disabled: false
|
|
||||||
mon:
|
|
||||||
disabled: false
|
|
||||||
osd:
|
|
||||||
disabled: false
|
|
||||||
labels: null
|
|
||||||
logCollector:
|
|
||||||
enabled: true
|
|
||||||
maxLogSize: 500M
|
|
||||||
periodicity: daily
|
|
||||||
mgr:
|
|
||||||
allowMultiplePerNode: false
|
|
||||||
count: 2
|
|
||||||
modules:
|
|
||||||
- enabled: true
|
|
||||||
name: pg_autoscaler
|
|
||||||
mon:
|
|
||||||
allowMultiplePerNode: false
|
|
||||||
count: 3
|
|
||||||
monitoring:
|
|
||||||
enabled: false
|
|
||||||
network:
|
|
||||||
connections:
|
|
||||||
compression:
|
|
||||||
enabled: false
|
|
||||||
encryption:
|
|
||||||
enabled: false
|
|
||||||
priorityClassNames:
|
|
||||||
mgr: system-cluster-critical
|
|
||||||
mon: system-node-critical
|
|
||||||
osd: system-node-critical
|
|
||||||
removeOSDsIfOutAndSafeToRemove: false
|
|
||||||
resources: null
|
|
||||||
skipUpgradeChecks: false
|
|
||||||
storage:
|
|
||||||
config: null
|
|
||||||
deviceFilter: ^sd[ab]
|
|
||||||
onlyApplyOSDPlacement: false
|
|
||||||
useAllDevices: false
|
|
||||||
useAllNodes: true
|
|
||||||
waitTimeoutForHealthyOSDInMinutes: 10
|
|
@ -1,298 +0,0 @@
|
|||||||
#################################################################################################################
|
|
||||||
# Define the settings for the rook-ceph cluster with common settings for a production cluster.
|
|
||||||
# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
|
|
||||||
# in this example. See the documentation for more details on storage settings available.
|
|
||||||
|
|
||||||
# For example, to create the cluster:
|
|
||||||
# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
|
|
||||||
# kubectl create -f cluster.yaml
|
|
||||||
#################################################################################################################
|
|
||||||
|
|
||||||
apiVersion: ceph.rook.io/v1
|
|
||||||
kind: CephCluster
|
|
||||||
metadata:
|
|
||||||
name: rook-ceph
|
|
||||||
namespace: rook-ceph # namespace:cluster
|
|
||||||
spec:
|
|
||||||
cephVersion:
|
|
||||||
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
|
|
||||||
# v16 is Pacific, and v17 is Quincy.
|
|
||||||
# RECOMMENDATION: In production, use a specific version tag instead of the general v17 flag, which pulls the latest release and could result in different
|
|
||||||
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
|
|
||||||
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v17.2.3-20220805
|
|
||||||
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
|
|
||||||
image: quay.io/ceph/ceph:v17.2.3
|
|
||||||
# Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
|
|
||||||
# Future versions such as `reef` (v18) would require this to be set to `true`.
|
|
||||||
# Do not set to true in production.
|
|
||||||
allowUnsupported: false
|
|
||||||
# The path on the host where configuration files will be persisted. Must be specified.
|
|
||||||
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
|
|
||||||
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
|
|
||||||
dataDirHostPath: /var/lib/rook
|
|
||||||
# Whether or not upgrade should continue even if a check fails
|
|
||||||
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
|
|
||||||
# Use at your OWN risk
|
|
||||||
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
|
|
||||||
skipUpgradeChecks: false
|
|
||||||
# Whether or not continue if PGs are not clean during an upgrade
|
|
||||||
continueUpgradeAfterChecksEvenIfNotHealthy: false
|
|
||||||
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
|
|
||||||
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
|
|
||||||
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
|
|
||||||
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
|
|
||||||
# The default wait timeout is 10 minutes.
|
|
||||||
waitTimeoutForHealthyOSDInMinutes: 10
|
|
||||||
mon:
|
|
||||||
# Set the number of mons to be started. Generally recommended to be 3.
|
|
||||||
# For highest availability, an odd number of mons should be specified.
|
|
||||||
count: 3
|
|
||||||
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
|
|
||||||
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
|
|
||||||
allowMultiplePerNode: false
|
|
||||||
mgr:
|
|
||||||
# When higher availability of the mgr is needed, increase the count to 2.
|
|
||||||
# In that case, one mgr will be active and one in standby. When Ceph updates which
|
|
||||||
# mgr is active, Rook will update the mgr services to match the active mgr.
|
|
||||||
count: 2
|
|
||||||
allowMultiplePerNode: false
|
|
||||||
modules:
|
|
||||||
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
|
|
||||||
# are already enabled by other settings in the cluster CR.
|
|
||||||
- name: pg_autoscaler
|
|
||||||
enabled: true
|
|
||||||
# enable the ceph dashboard for viewing cluster status
|
|
||||||
dashboard:
|
|
||||||
enabled: true
|
|
||||||
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
|
|
||||||
# urlPrefix: /ceph-dashboard
|
|
||||||
# serve the dashboard at the given port.
|
|
||||||
# port: 8443
|
|
||||||
# serve the dashboard using SSL
|
|
||||||
ssl: true
|
|
||||||
# enable prometheus alerting for cluster
|
|
||||||
monitoring:
|
|
||||||
# requires Prometheus to be pre-installed
|
|
||||||
enabled: false
|
|
||||||
network:
|
|
||||||
connections:
|
|
||||||
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
|
|
||||||
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
|
|
||||||
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
|
|
||||||
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
|
|
||||||
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
|
|
||||||
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
|
|
||||||
encryption:
|
|
||||||
enabled: false
|
|
||||||
# Whether to compress the data in transit across the wire. The default is false.
|
|
||||||
# Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
|
|
||||||
compression:
|
|
||||||
enabled: false
|
|
||||||
# enable host networking
|
|
||||||
#provider: host
|
|
||||||
# enable the Multus network provider
|
|
||||||
#provider: multus
|
|
||||||
#selectors:
|
|
||||||
# The selector keys are required to be `public` and `cluster`.
|
|
||||||
# Based on the configuration, the operator will do the following:
|
|
||||||
# 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
|
|
||||||
# 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
|
|
||||||
#
|
|
||||||
# In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
|
|
||||||
#
|
|
||||||
#public: public-conf --> NetworkAttachmentDefinition object name in Multus
|
|
||||||
#cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
|
|
||||||
# Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
|
|
||||||
#ipFamily: "IPv6"
|
|
||||||
# Ceph daemons to listen on both IPv4 and Ipv6 networks
|
|
||||||
#dualStack: false
|
|
||||||
# enable the crash collector for ceph daemon crash collection
|
|
||||||
crashCollector:
|
|
||||||
disable: false
|
|
||||||
# Uncomment daysToRetain to prune ceph crash entries older than the
|
|
||||||
# specified number of days.
|
|
||||||
#daysToRetain: 30
|
|
||||||
# enable log collector, daemons will log on files and rotate
|
|
||||||
logCollector:
|
|
||||||
enabled: true
|
|
||||||
periodicity: daily # one of: hourly, daily, weekly, monthly
|
|
||||||
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
|
|
||||||
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
|
|
||||||
cleanupPolicy:
|
|
||||||
# Since cluster cleanup is destructive to data, confirmation is required.
|
|
||||||
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
|
|
||||||
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
|
|
||||||
# Rook will immediately stop configuring the cluster and only wait for the delete command.
|
|
||||||
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
|
|
||||||
confirmation: ""
|
|
||||||
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
|
|
||||||
sanitizeDisks:
|
|
||||||
# method indicates if the entire disk should be sanitized or simply ceph's metadata
|
|
||||||
# in both case, re-install is possible
|
|
||||||
# possible choices are 'complete' or 'quick' (default)
|
|
||||||
method: quick
|
|
||||||
# dataSource indicate where to get random bytes from to write on the disk
|
|
||||||
# possible choices are 'zero' (default) or 'random'
|
|
||||||
# using random sources will consume entropy from the system and will take much more time then the zero source
|
|
||||||
dataSource: zero
|
|
||||||
# iteration overwrite N times instead of the default (1)
|
|
||||||
# takes an integer value
|
|
||||||
iteration: 1
|
|
||||||
# allowUninstallWithVolumes defines how the uninstall should be performed
|
|
||||||
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
|
|
||||||
allowUninstallWithVolumes: false
|
|
||||||
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
|
|
||||||
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
|
|
||||||
# tolerate taints with a key of 'storage-node'.
|
|
||||||
# placement:
|
|
||||||
# all:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: role
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - storage-node
|
|
||||||
# podAffinity:
|
|
||||||
# podAntiAffinity:
|
|
||||||
# topologySpreadConstraints:
|
|
||||||
# tolerations:
|
|
||||||
# - key: storage-node
|
|
||||||
# operator: Exists
|
|
||||||
# The above placement information can also be specified for mon, osd, and mgr components
|
|
||||||
# mon:
|
|
||||||
# Monitor deployments may contain an anti-affinity rule for avoiding monitor
|
|
||||||
# collocation on the same node. This is a required rule when host network is used
|
|
||||||
# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
|
|
||||||
# preferred rule with weight: 50.
|
|
||||||
# osd:
|
|
||||||
# prepareosd:
|
|
||||||
# mgr:
|
|
||||||
# cleanup:
|
|
||||||
annotations:
|
|
||||||
# all:
|
|
||||||
# mon:
|
|
||||||
# osd:
|
|
||||||
# cleanup:
|
|
||||||
# prepareosd:
|
|
||||||
# clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets.
|
|
||||||
# And clusterMetadata annotations will not be merged with `all` annotations.
|
|
||||||
# clusterMetadata:
|
|
||||||
# kubed.appscode.com/sync: "true"
|
|
||||||
# If no mgr annotations are set, prometheus scrape annotations will be set by default.
|
|
||||||
# mgr:
|
|
||||||
labels:
|
|
||||||
# all:
|
|
||||||
# mon:
|
|
||||||
# osd:
|
|
||||||
# cleanup:
|
|
||||||
# mgr:
|
|
||||||
# prepareosd:
|
|
||||||
# monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
|
|
||||||
# These labels can be passed as LabelSelector to Prometheus
|
|
||||||
# monitoring:
|
|
||||||
# crashcollector:
|
|
||||||
resources:
|
|
||||||
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
|
|
||||||
# mgr:
|
|
||||||
# limits:
|
|
||||||
# cpu: "500m"
|
|
||||||
# memory: "1024Mi"
|
|
||||||
# requests:
|
|
||||||
# cpu: "500m"
|
|
||||||
# memory: "1024Mi"
|
|
||||||
# The above example requests/limits can also be added to the other components
|
|
||||||
# mon:
|
|
||||||
# osd:
|
|
||||||
# For OSD it also is a possible to specify requests/limits based on device class
|
|
||||||
# osd-hdd:
|
|
||||||
# osd-ssd:
|
|
||||||
# osd-nvme:
|
|
||||||
# prepareosd:
|
|
||||||
# mgr-sidecar:
|
|
||||||
# crashcollector:
|
|
||||||
# logcollector:
|
|
||||||
# cleanup:
|
|
||||||
# The option to automatically remove OSDs that are out and are safe to destroy.
|
|
||||||
removeOSDsIfOutAndSafeToRemove: false
|
|
||||||
priorityClassNames:
|
|
||||||
#all: rook-ceph-default-priority-class
|
|
||||||
mon: system-node-critical
|
|
||||||
osd: system-node-critical
|
|
||||||
mgr: system-cluster-critical
|
|
||||||
#crashcollector: rook-ceph-crashcollector-priority-class
|
|
||||||
storage: # cluster level storage configuration and selection
|
|
||||||
useAllNodes: true
|
|
||||||
useAllDevices: true
|
|
||||||
#deviceFilter:
|
|
||||||
config:
|
|
||||||
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
|
|
||||||
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
|
|
||||||
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
|
|
||||||
# journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
|
|
||||||
# osdsPerDevice: "1" # this value can be overridden at the node or device level
|
|
||||||
# encryptedDevice: "true" # the default value for this option is "false"
|
|
||||||
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
|
|
||||||
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
|
|
||||||
# nodes:
|
|
||||||
# - name: "172.17.4.201"
|
|
||||||
# devices: # specific devices to use for storage can be specified for each node
|
|
||||||
# - name: "sdb"
|
|
||||||
# - name: "nvme01" # multiple osds can be created on high performance devices
|
|
||||||
# config:
|
|
||||||
# osdsPerDevice: "5"
|
|
||||||
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
|
|
||||||
# config: # configuration can be specified at the node level which overrides the cluster level config
|
|
||||||
# - name: "172.17.4.301"
|
|
||||||
# deviceFilter: "^sd."
|
|
||||||
# when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
|
|
||||||
onlyApplyOSDPlacement: false
|
|
||||||
# The section for configuring management of daemon disruptions during upgrade or fencing.
|
|
||||||
disruptionManagement:
|
|
||||||
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
|
|
||||||
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
|
|
||||||
# block eviction of OSDs by default and unblock them safely when drains are detected.
|
|
||||||
managePodBudgets: true
|
|
||||||
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
|
|
||||||
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
|
|
||||||
osdMaintenanceTimeout: 30
|
|
||||||
# A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
|
|
||||||
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
|
|
||||||
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
|
|
||||||
pgHealthCheckTimeout: 0
|
|
||||||
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
|
|
||||||
# Only available on OpenShift.
|
|
||||||
manageMachineDisruptionBudgets: false
|
|
||||||
# Namespace in which to watch for the MachineDisruptionBudgets.
|
|
||||||
machineDisruptionBudgetNamespace: openshift-machine-api
|
|
||||||
|
|
||||||
# healthChecks
|
|
||||||
# Valid values for daemons are 'mon', 'osd', 'status'
|
|
||||||
healthCheck:
|
|
||||||
daemonHealth:
|
|
||||||
mon:
|
|
||||||
disabled: false
|
|
||||||
interval: 45s
|
|
||||||
osd:
|
|
||||||
disabled: false
|
|
||||||
interval: 60s
|
|
||||||
status:
|
|
||||||
disabled: false
|
|
||||||
interval: 60s
|
|
||||||
# Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons.
|
|
||||||
livenessProbe:
|
|
||||||
mon:
|
|
||||||
disabled: false
|
|
||||||
mgr:
|
|
||||||
disabled: false
|
|
||||||
osd:
|
|
||||||
disabled: false
|
|
||||||
# Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons.
|
|
||||||
startupProbe:
|
|
||||||
mon:
|
|
||||||
disabled: false
|
|
||||||
mgr:
|
|
||||||
disabled: false
|
|
||||||
osd:
|
|
||||||
disabled: false
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,646 +0,0 @@
|
|||||||
#################################################################################################################
|
|
||||||
# The deployment for the rook operator
|
|
||||||
# Contains the common settings for most Kubernetes deployments.
|
|
||||||
# For example, to create the rook-ceph cluster:
|
|
||||||
# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
|
|
||||||
# kubectl create -f cluster.yaml
|
|
||||||
#
|
|
||||||
# Also see other operator sample files for variations of operator.yaml:
|
|
||||||
# - operator-openshift.yaml: Common settings for running in OpenShift
|
|
||||||
###############################################################################################################
|
|
||||||
|
|
||||||
# Rook Ceph Operator Config ConfigMap
|
|
||||||
# Use this ConfigMap to override Rook-Ceph Operator configurations.
|
|
||||||
# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
|
|
||||||
# Operator Deployment.
|
|
||||||
# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
|
|
||||||
# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
|
|
||||||
kind: ConfigMap
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: rook-ceph-operator-config
|
|
||||||
# should be in the namespace of the operator
|
|
||||||
namespace: rook-ceph # namespace:operator
|
|
||||||
data:
|
|
||||||
# The logging level for the operator: ERROR | WARNING | INFO | DEBUG
|
|
||||||
ROOK_LOG_LEVEL: "INFO"
|
|
||||||
|
|
||||||
# Enable the CSI driver.
|
|
||||||
# To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
|
|
||||||
ROOK_CSI_ENABLE_CEPHFS: "true"
|
|
||||||
# Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
|
|
||||||
ROOK_CSI_ENABLE_RBD: "true"
|
|
||||||
# Enable the CSI NFS driver. To start another version of the CSI driver, see image properties below.
|
|
||||||
ROOK_CSI_ENABLE_NFS: "false"
|
|
||||||
ROOK_CSI_ENABLE_GRPC_METRICS: "false"
|
|
||||||
|
|
||||||
# Set to true to enable Ceph CSI pvc encryption support.
|
|
||||||
CSI_ENABLE_ENCRYPTION: "false"
|
|
||||||
|
|
||||||
# Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
|
|
||||||
# in some network configurations where the SDN does not provide access to an external cluster or
|
|
||||||
# there is significant drop in read/write performance.
|
|
||||||
# CSI_ENABLE_HOST_NETWORK: "true"
|
|
||||||
|
|
||||||
# Set to true to enable adding volume metadata on the CephFS subvolume and RBD images.
|
|
||||||
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
|
|
||||||
# Hence enable metadata is false by default.
|
|
||||||
# CSI_ENABLE_METADATA: "true"
|
|
||||||
|
|
||||||
# cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
|
|
||||||
# like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
|
|
||||||
# CSI_CLUSTER_NAME: "my-prod-cluster"
|
|
||||||
|
|
||||||
# Set logging level for cephCSI containers maintained by the cephCSI.
|
|
||||||
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
|
|
||||||
# CSI_LOG_LEVEL: "0"
|
|
||||||
|
|
||||||
# Set logging level for Kubernetes-csi sidecar containers.
|
|
||||||
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
|
|
||||||
# CSI_SIDECAR_LOG_LEVEL: "0"
|
|
||||||
|
|
||||||
# Set replicas for csi provisioner deployment.
|
|
||||||
CSI_PROVISIONER_REPLICAS: "2"
|
|
||||||
|
|
||||||
# OMAP generator will generate the omap mapping between the PV name and the RBD image.
|
|
||||||
# CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
|
|
||||||
# By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
|
|
||||||
# it set it to false.
|
|
||||||
# CSI_ENABLE_OMAP_GENERATOR: "false"
|
|
||||||
|
|
||||||
# set to false to disable deployment of snapshotter container in CephFS provisioner pod.
|
|
||||||
CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
|
|
||||||
|
|
||||||
# set to false to disable deployment of snapshotter container in NFS provisioner pod.
|
|
||||||
CSI_ENABLE_NFS_SNAPSHOTTER: "true"
|
|
||||||
|
|
||||||
# set to false to disable deployment of snapshotter container in RBD provisioner pod.
|
|
||||||
CSI_ENABLE_RBD_SNAPSHOTTER: "true"
|
|
||||||
|
|
||||||
# Enable cephfs kernel driver instead of ceph-fuse.
|
|
||||||
# If you disable the kernel client, your application may be disrupted during upgrade.
|
|
||||||
# See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
|
|
||||||
# NOTE! cephfs quota is not supported in kernel version < 4.17
|
|
||||||
CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
|
|
||||||
|
|
||||||
# (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
|
|
||||||
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
|
|
||||||
CSI_RBD_FSGROUPPOLICY: "File"
|
|
||||||
|
|
||||||
# (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
|
|
||||||
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
|
|
||||||
CSI_CEPHFS_FSGROUPPOLICY: "File"
|
|
||||||
|
|
||||||
# (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
|
|
||||||
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
|
|
||||||
CSI_NFS_FSGROUPPOLICY: "File"
|
|
||||||
|
|
||||||
# (Optional) Allow starting unsupported ceph-csi image
|
|
||||||
ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
|
|
||||||
|
|
||||||
# (Optional) control the host mount of /etc/selinux for csi plugin pods.
|
|
||||||
CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
|
|
||||||
|
|
||||||
# The default version of CSI supported by Rook will be started. To change the version
|
|
||||||
# of the CSI driver to something other than what is officially supported, change
|
|
||||||
# these images to the desired release of the CSI driver.
|
|
||||||
# ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.7.2"
|
|
||||||
# ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1"
|
|
||||||
# ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.6.0"
|
|
||||||
# ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v3.3.0"
|
|
||||||
# ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0"
|
|
||||||
# ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.0.0"
|
|
||||||
|
|
||||||
# To indicate the image pull policy to be applied to all the containers in the csi driver pods.
|
|
||||||
# ROOK_CSI_IMAGE_PULL_POLICY: "IfNotPresent"
|
|
||||||
|
|
||||||
# (Optional) set user created priorityclassName for csi plugin pods.
|
|
||||||
CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
|
|
||||||
|
|
||||||
# (Optional) set user created priorityclassName for csi provisioner pods.
|
|
||||||
CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
|
|
||||||
|
|
||||||
# CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
|
|
||||||
# Default value is RollingUpdate.
|
|
||||||
# CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
|
|
||||||
# CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
|
|
||||||
# Default value is RollingUpdate.
|
|
||||||
# CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
|
|
||||||
|
|
||||||
# CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
|
|
||||||
# Default value is RollingUpdate.
|
|
||||||
# CSI_NFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
|
|
||||||
|
|
||||||
# kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
|
|
||||||
# ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
|
|
||||||
|
|
||||||
# Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
|
|
||||||
# ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
|
|
||||||
# Labels to add to the CSI RBD Deployments and DaemonSets Pods.
|
|
||||||
# ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
|
|
||||||
# Labels to add to the CSI NFS Deployments and DaemonSets Pods.
|
|
||||||
# ROOK_CSI_NFS_POD_LABELS: "key1=value1,key2=value2"
|
|
||||||
|
|
||||||
# (Optional) CephCSI CephFS plugin Volumes
|
|
||||||
# CSI_CEPHFS_PLUGIN_VOLUME: |
|
|
||||||
# - name: lib-modules
|
|
||||||
# hostPath:
|
|
||||||
# path: /run/current-system/kernel-modules/lib/modules/
|
|
||||||
# - name: host-nix
|
|
||||||
# hostPath:
|
|
||||||
# path: /nix
|
|
||||||
|
|
||||||
# (Optional) CephCSI CephFS plugin Volume mounts
|
|
||||||
# CSI_CEPHFS_PLUGIN_VOLUME_MOUNT: |
|
|
||||||
# - name: host-nix
|
|
||||||
# mountPath: /nix
|
|
||||||
# readOnly: true
|
|
||||||
|
|
||||||
# (Optional) CephCSI RBD plugin Volumes
|
|
||||||
# CSI_RBD_PLUGIN_VOLUME: |
|
|
||||||
# - name: lib-modules
|
|
||||||
# hostPath:
|
|
||||||
# path: /run/current-system/kernel-modules/lib/modules/
|
|
||||||
# - name: host-nix
|
|
||||||
# hostPath:
|
|
||||||
# path: /nix
|
|
||||||
|
|
||||||
# (Optional) CephCSI RBD plugin Volume mounts
|
|
||||||
# CSI_RBD_PLUGIN_VOLUME_MOUNT: |
|
|
||||||
# - name: host-nix
|
|
||||||
# mountPath: /nix
|
|
||||||
# readOnly: true
|
|
||||||
|
|
||||||
# (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
|
|
||||||
# CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
|
|
||||||
# (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
|
|
||||||
# CSI_PROVISIONER_TOLERATIONS: |
|
|
||||||
# - effect: NoSchedule
|
|
||||||
# key: node-role.kubernetes.io/control-plane
|
|
||||||
# operator: Exists
|
|
||||||
# - effect: NoExecute
|
|
||||||
# key: node-role.kubernetes.io/etcd
|
|
||||||
# operator: Exists
|
|
||||||
# (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
|
|
||||||
# CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
|
|
||||||
# (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
|
|
||||||
# CSI_PLUGIN_TOLERATIONS: |
|
|
||||||
# - effect: NoSchedule
|
|
||||||
# key: node-role.kubernetes.io/control-plane
|
|
||||||
# operator: Exists
|
|
||||||
# - effect: NoExecute
|
|
||||||
# key: node-role.kubernetes.io/etcd
|
|
||||||
# operator: Exists
|
|
||||||
|
|
||||||
# (Optional) CephCSI RBD provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
|
|
||||||
# CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
|
|
||||||
# (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
|
|
||||||
# CSI_RBD_PROVISIONER_TOLERATIONS: |
|
|
||||||
# - key: node.rook.io/rbd
|
|
||||||
# operator: Exists
|
|
||||||
# (Optional) CephCSI RBD plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
|
|
||||||
# CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
|
|
||||||
# (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
|
|
||||||
# CSI_RBD_PLUGIN_TOLERATIONS: |
|
|
||||||
# - key: node.rook.io/rbd
|
|
||||||
# operator: Exists
|
|
||||||
|
|
||||||
# (Optional) CephCSI CephFS provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
|
|
||||||
# CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
|
|
||||||
# (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
|
|
||||||
# CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
|
|
||||||
# - key: node.rook.io/cephfs
|
|
||||||
# operator: Exists
|
|
||||||
# (Optional) CephCSI CephFS plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
|
|
||||||
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
|
|
||||||
# NOTE: Support for defining NodeAffinity for operators other than "In" and "Exists" requires the user to input a
|
|
||||||
# valid v1.NodeAffinity JSON or YAML string. For example, the following is valid YAML v1.NodeAffinity:
|
|
||||||
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: |
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: myKey
|
|
||||||
# operator: DoesNotExist
|
|
||||||
# (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
|
|
||||||
# CSI_CEPHFS_PLUGIN_TOLERATIONS: |
|
|
||||||
# - key: node.rook.io/cephfs
|
|
||||||
# operator: Exists
|
|
||||||
|
|
||||||
# (Optional) CephCSI NFS provisioner NodeAffinity (overrides CSI_PROVISIONER_NODE_AFFINITY).
|
|
||||||
# CSI_NFS_PROVISIONER_NODE_AFFINITY: "role=nfs-node"
|
|
||||||
# (Optional) CephCSI NFS provisioner tolerations list (overrides CSI_PROVISIONER_TOLERATIONS).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
|
|
||||||
# CSI_NFS_PROVISIONER_TOLERATIONS: |
|
|
||||||
# - key: node.rook.io/nfs
|
|
||||||
# operator: Exists
|
|
||||||
# (Optional) CephCSI NFS plugin NodeAffinity (overrides CSI_PLUGIN_NODE_AFFINITY).
|
|
||||||
# CSI_NFS_PLUGIN_NODE_AFFINITY: "role=nfs-node"
|
|
||||||
# (Optional) CephCSI NFS plugin tolerations list (overrides CSI_PLUGIN_TOLERATIONS).
|
|
||||||
# Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
|
|
||||||
# CSI_NFS_PLUGIN_TOLERATIONS: |
|
|
||||||
# - key: node.rook.io/nfs
|
|
||||||
# operator: Exists
|
|
||||||
|
|
||||||
# (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
|
|
||||||
# requests and limits you want to apply for provisioner pod
|
|
||||||
#CSI_RBD_PROVISIONER_RESOURCE: |
|
|
||||||
# - name : csi-provisioner
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-resizer
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-attacher
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-snapshotter
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-rbdplugin
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
# - name : csi-omap-generator
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
# - name : liveness-prometheus
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
|
|
||||||
# requests and limits you want to apply for plugin pod
|
|
||||||
#CSI_RBD_PLUGIN_RESOURCE: |
|
|
||||||
# - name : driver-registrar
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# - name : csi-rbdplugin
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
# - name : liveness-prometheus
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
|
|
||||||
# requests and limits you want to apply for provisioner pod
|
|
||||||
#CSI_CEPHFS_PROVISIONER_RESOURCE: |
|
|
||||||
# - name : csi-provisioner
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-resizer
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-attacher
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-snapshotter
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-cephfsplugin
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
# - name : liveness-prometheus
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
|
|
||||||
# requests and limits you want to apply for plugin pod
|
|
||||||
#CSI_CEPHFS_PLUGIN_RESOURCE: |
|
|
||||||
# - name : driver-registrar
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# - name : csi-cephfsplugin
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
# - name : liveness-prometheus
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
|
|
||||||
# (Optional) CEPH CSI NFS provisioner resource requirement list, Put here list of resource
|
|
||||||
# requests and limits you want to apply for provisioner pod
|
|
||||||
# CSI_NFS_PROVISIONER_RESOURCE: |
|
|
||||||
# - name : csi-provisioner
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 200m
|
|
||||||
# - name : csi-nfsplugin
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
# (Optional) CEPH CSI NFS plugin resource requirement list, Put here list of resource
|
|
||||||
# requests and limits you want to apply for plugin pod
|
|
||||||
# CSI_NFS_PLUGIN_RESOURCE: |
|
|
||||||
# - name : driver-registrar
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 128Mi
|
|
||||||
# cpu: 50m
|
|
||||||
# limits:
|
|
||||||
# memory: 256Mi
|
|
||||||
# cpu: 100m
|
|
||||||
# - name : csi-nfsplugin
|
|
||||||
# resource:
|
|
||||||
# requests:
|
|
||||||
# memory: 512Mi
|
|
||||||
# cpu: 250m
|
|
||||||
# limits:
|
|
||||||
# memory: 1Gi
|
|
||||||
# cpu: 500m
|
|
||||||
|
|
||||||
# Configure CSI Ceph FS grpc and liveness metrics port
|
|
||||||
# Set to true to enable Ceph CSI liveness container.
|
|
||||||
CSI_ENABLE_LIVENESS: "false"
|
|
||||||
# CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
|
|
||||||
# CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
|
|
||||||
# Configure CSI RBD grpc and liveness metrics port
|
|
||||||
CSI_RBD_GRPC_METRICS_PORT: "9092"
|
|
||||||
# CSI_RBD_LIVENESS_METRICS_PORT: "9080"
|
|
||||||
# CSIADDONS_PORT: "9070"
|
|
||||||
|
|
||||||
# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
|
|
||||||
ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
|
|
||||||
|
|
||||||
# Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
|
|
||||||
# This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
|
|
||||||
ROOK_ENABLE_DISCOVERY_DAEMON: "false"
|
|
||||||
# The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
|
|
||||||
ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
|
|
||||||
# Enable the csi addons sidecar.
|
|
||||||
CSI_ENABLE_CSIADDONS: "false"
|
|
||||||
# ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
|
|
||||||
# The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
|
|
||||||
CSI_GRPC_TIMEOUT_SECONDS: "150"
|
|
||||||
|
|
||||||
# Enable topology based provisioning.
|
|
||||||
CSI_ENABLE_TOPOLOGY: "false"
|
|
||||||
# Domain labels define which node labels to use as domains
|
|
||||||
# for CSI nodeplugins to advertise their domains
|
|
||||||
# NOTE: the value here serves as an example and needs to be
|
|
||||||
# updated with node labels that define domains of interest
|
|
||||||
# CSI_TOPOLOGY_DOMAIN_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
|
|
||||||
---
|
|
||||||
# OLM: BEGIN OPERATOR DEPLOYMENT
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: rook-ceph-operator
|
|
||||||
namespace: rook-ceph # namespace:operator
|
|
||||||
labels:
|
|
||||||
operator: rook
|
|
||||||
storage-backend: ceph
|
|
||||||
app.kubernetes.io/name: rook-ceph
|
|
||||||
app.kubernetes.io/instance: rook-ceph
|
|
||||||
app.kubernetes.io/component: rook-ceph-operator
|
|
||||||
app.kubernetes.io/part-of: rook-ceph-operator
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: rook-ceph-operator
|
|
||||||
strategy:
|
|
||||||
type: Recreate
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: rook-ceph-operator
|
|
||||||
spec:
|
|
||||||
serviceAccountName: rook-ceph-system
|
|
||||||
containers:
|
|
||||||
- name: rook-ceph-operator
|
|
||||||
image: rook/ceph:v1.10.4
|
|
||||||
args: ["ceph", "operator"]
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 2016
|
|
||||||
runAsGroup: 2016
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /var/lib/rook
|
|
||||||
name: rook-config
|
|
||||||
- mountPath: /etc/ceph
|
|
||||||
name: default-config-dir
|
|
||||||
- mountPath: /etc/webhook
|
|
||||||
name: webhook-cert
|
|
||||||
ports:
|
|
||||||
- containerPort: 9443
|
|
||||||
name: https-webhook
|
|
||||||
protocol: TCP
|
|
||||||
env:
|
|
||||||
# If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
|
|
||||||
# If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
|
|
||||||
- name: ROOK_CURRENT_NAMESPACE_ONLY
|
|
||||||
value: "false"
|
|
||||||
# Rook Discover toleration. Will tolerate all taints with all keys.
|
|
||||||
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
|
|
||||||
# - name: DISCOVER_TOLERATION
|
|
||||||
# value: "NoSchedule"
|
|
||||||
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
|
|
||||||
# - name: DISCOVER_TOLERATION_KEY
|
|
||||||
# value: "<KeyOfTheTaintToTolerate>"
|
|
||||||
# (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
|
|
||||||
# - name: DISCOVER_TOLERATIONS
|
|
||||||
# value: |
|
|
||||||
# - effect: NoSchedule
|
|
||||||
# key: node-role.kubernetes.io/control-plane
|
|
||||||
# operator: Exists
|
|
||||||
# - effect: NoExecute
|
|
||||||
# key: node-role.kubernetes.io/etcd
|
|
||||||
# operator: Exists
|
|
||||||
# (Optional) Rook Discover priority class name to set on the pod(s)
|
|
||||||
# - name: DISCOVER_PRIORITY_CLASS_NAME
|
|
||||||
# value: "<PriorityClassName>"
|
|
||||||
# (Optional) Discover Agent NodeAffinity.
|
|
||||||
# - name: DISCOVER_AGENT_NODE_AFFINITY
|
|
||||||
# value: "role=storage-node; storage=rook, ceph"
|
|
||||||
# (Optional) Discover Agent Pod Labels.
|
|
||||||
# - name: DISCOVER_AGENT_POD_LABELS
|
|
||||||
# value: "key1=value1,key2=value2"
|
|
||||||
|
|
||||||
# The duration between discovering devices in the rook-discover daemonset.
|
|
||||||
- name: ROOK_DISCOVER_DEVICES_INTERVAL
|
|
||||||
value: "60m"
|
|
||||||
|
|
||||||
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
|
|
||||||
# Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
|
|
||||||
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
|
|
||||||
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
|
|
||||||
value: "false"
|
|
||||||
|
|
||||||
# Disable automatic orchestration when new devices are discovered
|
|
||||||
- name: ROOK_DISABLE_DEVICE_HOTPLUG
|
|
||||||
value: "false"
|
|
||||||
|
|
||||||
# Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
|
|
||||||
# In case of more than one regex, use comma to separate between them.
|
|
||||||
# Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
|
|
||||||
# Add regex expression after putting a comma to blacklist a disk
|
|
||||||
# If value is empty, the default regex will be used.
|
|
||||||
- name: DISCOVER_DAEMON_UDEV_BLACKLIST
|
|
||||||
value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
|
|
||||||
|
|
||||||
# Time to wait until the node controller will move Rook pods to other
|
|
||||||
# nodes after detecting an unreachable node.
|
|
||||||
# Pods affected by this setting are:
|
|
||||||
# mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
|
|
||||||
# The value used in this variable replaces the default value of 300 secs
|
|
||||||
# added automatically by k8s as Toleration for
|
|
||||||
# <node.kubernetes.io/unreachable>
|
|
||||||
# The total amount of time to reschedule Rook pods in healthy nodes
|
|
||||||
# before detecting a <not ready node> condition will be the sum of:
|
|
||||||
# --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
|
|
||||||
# --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
|
|
||||||
- name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
|
|
||||||
value: "5"
|
|
||||||
|
|
||||||
- name: ROOK_DISABLE_ADMISSION_CONTROLLER
|
|
||||||
value: "false"
|
|
||||||
|
|
||||||
# The name of the node to pass with the downward API
|
|
||||||
- name: NODE_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
# The pod name to pass with the downward API
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
# The pod namespace to pass with the downward API
|
|
||||||
- name: POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
# Recommended resource requests and limits, if desired
|
|
||||||
#resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 500m
|
|
||||||
# memory: 512Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
# Uncomment it to run lib bucket provisioner in multithreaded mode
|
|
||||||
#- name: LIB_BUCKET_PROVISIONER_THREADS
|
|
||||||
# value: "5"
|
|
||||||
|
|
||||||
# Uncomment it to run rook operator on the host network
|
|
||||||
#hostNetwork: true
|
|
||||||
volumes:
|
|
||||||
- name: rook-config
|
|
||||||
emptyDir: {}
|
|
||||||
- name: default-config-dir
|
|
||||||
emptyDir: {}
|
|
||||||
- name: webhook-cert
|
|
||||||
emptyDir: {}
|
|
||||||
# OLM: END OPERATOR DEPLOYMENT
|
|
38
ansible/roles/k8s_storage_ebs_manifests/tasks/main.yaml
Normal file
38
ansible/roles/k8s_storage_ebs_manifests/tasks/main.yaml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
- name: create target directory for open-ebs files
|
||||||
|
become: false
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ ansible_search_path[0] }}/files/ebs"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
|
|
||||||
|
- name: download the open-ebs operator (lite) manifest
|
||||||
|
become: false
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "https://openebs.github.io/charts/{{ item }}"
|
||||||
|
dest: "{{ ansible_search_path[0] }}/files/ebs/{{ item }}"
|
||||||
|
creates: "{{ ansible_search_path[0] }}/files/ebs/{{ item }}"
|
||||||
|
mode: 0664
|
||||||
|
with_items:
|
||||||
|
- openebs-operator-lite.yaml
|
||||||
|
|
||||||
|
- name: template out the open-ebs storage class definitions
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ebs_storage_class.yaml.j2
|
||||||
|
dest: "{{ ansible_search_path[0] }}/files/ebs/ebs_storage_class_{{ item }}.yaml"
|
||||||
|
with_items:
|
||||||
|
- ssd
|
||||||
|
- hdd
|
||||||
|
|
||||||
|
- name: install the open-ebs operator (lite)
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
src: "{{ ansible_search_path[0] }}/files/ebs/openebs-operator-lite.yaml"
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: install the open-ebs storage classes
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
src: "{{ ansible_search_path[0] }}/files/ebs/ebs_storage_class_{{ item }}.yaml"
|
||||||
|
state: present
|
||||||
|
with_items:
|
||||||
|
- ssd
|
||||||
|
- hdd
|
@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
provisioner: openebs.io/local
|
||||||
|
metadata:
|
||||||
|
name: {{ item }}
|
||||||
|
annotations:
|
||||||
|
openebs.io/cas-type: local
|
||||||
|
cas.openebs.io/config: |
|
||||||
|
- name: StorageType
|
||||||
|
value: "hostpath"
|
||||||
|
- name: BasePath
|
||||||
|
value: "/ebs/{{ item }}/"
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
reclaimPolicy: Retain
|
60
ansible/roles/k8s_storage_ebs_nodes/tasks/main.yaml
Normal file
60
ansible/roles/k8s_storage_ebs_nodes/tasks/main.yaml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
# - name: create mount directories for ebs drives
|
||||||
|
# ansible.builtin.file:
|
||||||
|
# path: "{{ item }}"
|
||||||
|
# state: directory
|
||||||
|
# owner: root
|
||||||
|
# group: root
|
||||||
|
# mode: 0775
|
||||||
|
# with_items:
|
||||||
|
# - /ebs
|
||||||
|
# - /ebs/ssd
|
||||||
|
# - /ebs/hdd
|
||||||
|
|
||||||
|
- name: ensure parted is installed
|
||||||
|
community.general.pacman:
|
||||||
|
name: parted
|
||||||
|
state: latest
|
||||||
|
update_cache: true
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Archlinux'
|
||||||
|
|
||||||
|
- name: create containerd image partition
|
||||||
|
community.general.parted:
|
||||||
|
device: "{{ item.disk }}"
|
||||||
|
align: optimal
|
||||||
|
name: "{{ item.part }}"
|
||||||
|
label: gpt
|
||||||
|
number: 1
|
||||||
|
part_start: 0%
|
||||||
|
part_end: 100%
|
||||||
|
state: present
|
||||||
|
fs_type: ext4
|
||||||
|
with_items:
|
||||||
|
- disk: /dev/vdc
|
||||||
|
part: ebs-ssd
|
||||||
|
- disk: /dev/vdd
|
||||||
|
part: ebs-hdd
|
||||||
|
|
||||||
|
- name: create containerd partition filesystem
|
||||||
|
community.general.filesystem:
|
||||||
|
dev: "{{ item }}"
|
||||||
|
fstype: ext4
|
||||||
|
resizefs: true
|
||||||
|
state: present
|
||||||
|
with_items:
|
||||||
|
- /dev/disk/by-partlabel/ebs-ssd
|
||||||
|
- /dev/disk/by-partlabel/ebs-hdd
|
||||||
|
|
||||||
|
- name: mount ebs disks
|
||||||
|
ansible.posix.mount:
|
||||||
|
state: mounted
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
path: "{{ item.path }}"
|
||||||
|
fstype: ext4
|
||||||
|
boot: true
|
||||||
|
with_items:
|
||||||
|
- src: /dev/disk/by-partlabel/ebs-ssd
|
||||||
|
path: /ebs/ssd
|
||||||
|
- src: /dev/disk/by-partlabel/ebs-hdd
|
||||||
|
path: /ebs/hdd
|
@ -1,7 +1,14 @@
|
|||||||
---
|
---
|
||||||
- name: scratch testing
|
# - name: nodes
|
||||||
|
# hosts: k8s_worker
|
||||||
|
# gather_facts: true
|
||||||
|
# become: true
|
||||||
|
# roles:
|
||||||
|
# - k8s_storage_ebs_nodes
|
||||||
|
|
||||||
|
- name: manifests
|
||||||
hosts: localhost
|
hosts: localhost
|
||||||
gather_facts: true
|
gather_facts: false
|
||||||
become: false
|
become: false
|
||||||
roles:
|
roles:
|
||||||
- k8s_storage_deploy
|
- k8s_storage_ebs_manifests
|
Loading…
Reference in New Issue
Block a user