1
0

reoganize storage roles

This commit is contained in:
michael 2022-12-05 23:18:04 +13:00
parent 3e70f70fa3
commit f230db3739
21 changed files with 17515 additions and 1 deletions

View File

@ -0,0 +1,7 @@
---
- name: destroy rook storage
hosts: k8s_worker
gather_facts: true
become: true
roles:
- k8s_ebs_destroy

View File

@ -4,4 +4,4 @@
gather_facts: true gather_facts: true
become: true become: true
roles: roles:
- k8s_storage_destroy - k8s_rook_destroy

View File

@ -0,0 +1,30 @@
---
- name: reset the nodes
ansible.builtin.shell:
cmd: |
kubeadm reset -f
- name: delete orphaned files
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /etc/cni
- /etc/kubernetes
- /var/lib/dockershim
- /var/lib/etcd
- /var/lib/kubelet
- /var/run/kubernetes
- name: purge iptables
ansible.builtin.shell:
cmd: |
iptables -F && iptables -X
iptables -t nat -F && iptables -t nat -X
iptables -t raw -F && iptables -t raw -X
iptables -t mangle -F && iptables -t mangle -X
- name: restart the containerd service
ansible.builtin.service:
name: containerd
state: restarted

View File

@ -0,0 +1,15 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
provisioner: openebs.io/local
metadata:
name: hdd
annotations:
openebs.io/cas-type: local
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/ebs/hdd/"
volumeBindingMode: Immediate
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,15 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
provisioner: openebs.io/local
metadata:
name: ssd
annotations:
openebs.io/cas-type: local
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/ebs/ssd/"
volumeBindingMode: Immediate
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,934 @@
# This manifest deploys the OpenEBS control plane components, with associated CRs & RBAC rules
# NOTE: On GKE, deploy the openebs-operator.yaml in admin context
# Create the OpenEBS namespace
apiVersion: v1
kind: Namespace
metadata:
name: openebs
---
# Create Maya Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: openebs-maya-operator
namespace: openebs
---
# Define Role that allows operations on K8s pods/deployments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-maya-operator
rules:
- apiGroups: ["*"]
resources: ["nodes", "nodes/proxy"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["statefulsets", "daemonsets"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["resourcequotas", "limitranges"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "poddisruptionbudgets", "certificatesigningrequests"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
verbs: ["*"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete", "patch"]
- apiGroups: ["openebs.io"]
resources: [ "*"]
verbs: ["*"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
# Bind the Service Account with the Role Privileges.
# TODO: Check if default account also needs to be there
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-maya-operator
subjects:
- kind: ServiceAccount
name: openebs-maya-operator
namespace: openebs
roleRef:
kind: ClusterRole
name: openebs-maya-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
name: blockdevices.openebs.io
spec:
group: openebs.io
names:
kind: BlockDevice
listKind: BlockDeviceList
plural: blockdevices
shortNames:
- bd
singular: blockdevice
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.nodeAttributes.nodeName
name: NodeName
type: string
- jsonPath: .spec.path
name: Path
priority: 1
type: string
- jsonPath: .spec.filesystem.fsType
name: FSType
priority: 1
type: string
- jsonPath: .spec.capacity.storage
name: Size
type: string
- jsonPath: .status.claimState
name: ClaimState
type: string
- jsonPath: .status.state
name: Status
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: BlockDevice is the Schema for the blockdevices API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: DeviceSpec defines the properties and runtime status of a BlockDevice
properties:
aggregateDevice:
description: AggregateDevice was intended to store the hierarchical information in cases of LVM. However this is currently not implemented and may need to be re-looked into for better design. To be deprecated
type: string
capacity:
description: Capacity
properties:
logicalSectorSize:
description: LogicalSectorSize is blockdevice logical-sector size in bytes
format: int32
type: integer
physicalSectorSize:
description: PhysicalSectorSize is blockdevice physical-Sector size in bytes
format: int32
type: integer
storage:
description: Storage is the blockdevice capacity in bytes
format: int64
type: integer
required:
- storage
type: object
claimRef:
description: ClaimRef is the reference to the BDC which has claimed this BD
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
details:
description: Details contain static attributes of BD like model,serial, and so forth
properties:
compliance:
description: Compliance is standards/specifications version implemented by device firmware such as SPC-1, SPC-2, etc
type: string
deviceType:
description: DeviceType represents the type of device like sparse, disk, partition, lvm, crypt
enum:
- disk
- partition
- sparse
- loop
- lvm
- crypt
- dm
- mpath
type: string
driveType:
description: DriveType is the type of backing drive, HDD/SSD
enum:
- HDD
- SSD
- Unknown
- ""
type: string
firmwareRevision:
description: FirmwareRevision is the disk firmware revision
type: string
hardwareSectorSize:
description: HardwareSectorSize is the hardware sector size in bytes
format: int32
type: integer
logicalBlockSize:
description: LogicalBlockSize is the logical block size in bytes reported by /sys/class/block/sda/queue/logical_block_size
format: int32
type: integer
model:
description: Model is model of disk
type: string
physicalBlockSize:
description: PhysicalBlockSize is the physical block size in bytes reported by /sys/class/block/sda/queue/physical_block_size
format: int32
type: integer
serial:
description: Serial is serial number of disk
type: string
vendor:
description: Vendor is vendor of disk
type: string
type: object
devlinks:
description: DevLinks contains soft links of a block device like /dev/by-id/... /dev/by-uuid/...
items:
description: DeviceDevLink holds the mapping between type and links like by-id type or by-path type link
properties:
kind:
description: Kind is the type of link like by-id or by-path.
enum:
- by-id
- by-path
type: string
links:
description: Links are the soft links
items:
type: string
type: array
type: object
type: array
filesystem:
description: FileSystem contains mountpoint and filesystem type
properties:
fsType:
description: Type represents the FileSystem type of the block device
type: string
mountPoint:
description: MountPoint represents the mountpoint of the block device.
type: string
type: object
nodeAttributes:
description: NodeAttributes has the details of the node on which BD is attached
properties:
nodeName:
description: NodeName is the name of the Kubernetes node resource on which the device is attached
type: string
type: object
parentDevice:
description: "ParentDevice was intended to store the UUID of the parent Block Device as is the case for partitioned block devices. \n For example: /dev/sda is the parent for /dev/sda1 To be deprecated"
type: string
partitioned:
description: Partitioned represents if BlockDevice has partitions or not (Yes/No) Currently always default to No. To be deprecated
enum:
- "Yes"
- "No"
type: string
path:
description: Path contain devpath (e.g. /dev/sdb)
type: string
required:
- capacity
- devlinks
- nodeAttributes
- path
type: object
status:
description: DeviceStatus defines the observed state of BlockDevice
properties:
claimState:
description: ClaimState represents the claim state of the block device
enum:
- Claimed
- Unclaimed
- Released
type: string
state:
description: State is the current state of the blockdevice (Active/Inactive/Unknown)
enum:
- Active
- Inactive
- Unknown
type: string
required:
- claimState
- state
type: object
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
name: blockdeviceclaims.openebs.io
spec:
group: openebs.io
names:
kind: BlockDeviceClaim
listKind: BlockDeviceClaimList
plural: blockdeviceclaims
shortNames:
- bdc
singular: blockdeviceclaim
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.blockDeviceName
name: BlockDeviceName
type: string
- jsonPath: .status.phase
name: Phase
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: BlockDeviceClaim is the Schema for the blockdeviceclaims API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: DeviceClaimSpec defines the request details for a BlockDevice
properties:
blockDeviceName:
description: BlockDeviceName is the reference to the block-device backing this claim
type: string
blockDeviceNodeAttributes:
description: BlockDeviceNodeAttributes is the attributes on the node from which a BD should be selected for this claim. It can include nodename, failure domain etc.
properties:
hostName:
description: HostName represents the hostname of the Kubernetes node resource where the BD should be present
type: string
nodeName:
description: NodeName represents the name of the Kubernetes node resource where the BD should be present
type: string
type: object
deviceClaimDetails:
description: Details of the device to be claimed
properties:
allowPartition:
description: AllowPartition represents whether to claim a full block device or a device that is a partition
type: boolean
blockVolumeMode:
description: 'BlockVolumeMode represents whether to claim a device in Block mode or Filesystem mode. These are use cases of BlockVolumeMode: 1) Not specified: VolumeMode check will not be effective 2) VolumeModeBlock: BD should not have any filesystem or mountpoint 3) VolumeModeFileSystem: BD should have a filesystem and mountpoint. If DeviceFormat is specified then the format should match with the FSType in BD'
type: string
formatType:
description: Format of the device required, eg:ext4, xfs
type: string
type: object
deviceType:
description: DeviceType represents the type of drive like SSD, HDD etc.,
nullable: true
type: string
hostName:
description: Node name from where blockdevice has to be claimed. To be deprecated. Use NodeAttributes.HostName instead
type: string
resources:
description: Resources will help with placing claims on Capacity, IOPS
properties:
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum resources required. eg: if storage resource of 10G is requested minimum capacity of 10G should be available TODO for validating'
type: object
required:
- requests
type: object
selector:
description: Selector is used to find block devices to be considered for claiming
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: object
status:
description: DeviceClaimStatus defines the observed state of BlockDeviceClaim
properties:
phase:
description: Phase represents the current phase of the claim
type: string
required:
- phase
type: object
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# This is the node-disk-manager related config.
# It can be used to customize the disks probes and filters
apiVersion: v1
kind: ConfigMap
metadata:
name: openebs-ndm-config
namespace: openebs
labels:
openebs.io/component-name: ndm-config
data:
# udev-probe is default or primary probe it should be enabled to run ndm
# filterconfigs contains configs of filters. To provide a group of include
# and exclude values add it as , separated string
node-disk-manager.config: |
probeconfigs:
- key: udev-probe
name: udev probe
state: true
- key: seachest-probe
name: seachest probe
state: false
- key: smart-probe
name: smart probe
state: true
filterconfigs:
- key: os-disk-exclude-filter
name: os disk exclude filter
state: true
exclude: "/,/etc/hosts,/boot"
- key: vendor-filter
name: vendor filter
state: true
include: ""
exclude: "CLOUDBYT,OpenEBS"
- key: path-filter
name: path filter
state: true
include: ""
exclude: "/dev/loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/md,/dev/dm-,/dev/rbd,/dev/zd"
# metconfig can be used to decorate the block device with different types of labels
# that are available on the node or come in a device properties.
# node labels - the node where bd is discovered. A whitlisted label prefixes
# attribute labels - a property of the BD can be added as a ndm label as ndm.io/<property>=<property-value>
metaconfigs:
- key: node-labels
name: node labels
pattern: ""
- key: device-labels
name: device labels
type: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: openebs-ndm
namespace: openebs
labels:
name: openebs-ndm
openebs.io/component-name: ndm
openebs.io/version: 3.3.0
spec:
selector:
matchLabels:
name: openebs-ndm
openebs.io/component-name: ndm
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
name: openebs-ndm
openebs.io/component-name: ndm
openebs.io/version: 3.3.0
spec:
# By default the node-disk-manager will be run on all kubernetes nodes
# If you would like to limit this to only some nodes, say the nodes
# that have storage attached, you could label those node and use
# nodeSelector.
#
# e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node"
# kubectl label node <node-name> "openebs.io/nodegroup"="storage-node"
#nodeSelector:
# "openebs.io/nodegroup": "storage-node"
serviceAccountName: openebs-maya-operator
hostNetwork: true
# host PID is used to check status of iSCSI Service when the NDM
# API service is enabled
#hostPID: true
containers:
- name: node-disk-manager
image: openebs/node-disk-manager:2.0.0
args:
- -v=4
# The feature-gate is used to enable the new UUID algorithm.
- --feature-gates="GPTBasedUUID"
# Use partition table UUID instead of create single partition to get
# partition UUID. Require `GPTBasedUUID` to be enabled with.
# - --feature-gates="PartitionTableUUID"
# Detect changes to device size, filesystem and mount-points without restart.
# - --feature-gates="ChangeDetection"
# The feature gate is used to start the gRPC API service. The gRPC server
# starts at 9115 port by default. This feature is currently in Alpha state
# - --feature-gates="APIService"
# The feature gate is used to enable NDM, to create blockdevice resources
# for unused partitions on the OS disk
# - --feature-gates="UseOSDisk"
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- name: config
mountPath: /host/node-disk-manager.config
subPath: node-disk-manager.config
readOnly: true
# make udev database available inside container
- name: udev
mountPath: /run/udev
- name: procmount
mountPath: /host/proc
readOnly: true
- name: devmount
mountPath: /dev
- name: basepath
mountPath: /var/openebs/ndm
- name: sparsepath
mountPath: /var/openebs/sparse
env:
# namespace in which NDM is installed will be passed to NDM Daemonset
# as environment variable
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# pass hostname as env variable using downward API to the NDM container
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# specify the directory where the sparse files need to be created.
# if not specified, then sparse files will not be created.
- name: SPARSE_FILE_DIR
value: "/var/openebs/sparse"
# Size(bytes) of the sparse file to be created.
- name: SPARSE_FILE_SIZE
value: "10737418240"
# Specify the number of sparse files to be created
- name: SPARSE_FILE_COUNT
value: "0"
livenessProbe:
exec:
command:
- pgrep
- "ndm"
initialDelaySeconds: 30
periodSeconds: 60
volumes:
- name: config
configMap:
name: openebs-ndm-config
- name: udev
hostPath:
path: /run/udev
type: Directory
# mount /proc (to access mount file of process 1 of host) inside container
# to read mount-point of disks and partitions
- name: procmount
hostPath:
path: /proc
type: Directory
- name: devmount
# the /dev directory is mounted so that we have access to the devices that
# are connected at runtime of the pod.
hostPath:
path: /dev
type: Directory
- name: basepath
hostPath:
path: /var/openebs/ndm
type: DirectoryOrCreate
- name: sparsepath
hostPath:
path: /var/openebs/sparse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-ndm-operator
namespace: openebs
labels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: 3.3.0
spec:
selector:
matchLabels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: 3.3.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: node-disk-operator
image: openebs/node-disk-operator:2.0.0
imagePullPolicy: IfNotPresent
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# the service account of the ndm-operator pod
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPERATOR_NAME
value: "node-disk-operator"
- name: CLEANUP_JOB_IMAGE
value: "openebs/linux-utils:3.3.0"
# OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets
# to the cleanup pod launched by NDM operator
#- name: OPENEBS_IO_IMAGE_PULL_SECRETS
# value: ""
livenessProbe:
httpGet:
path: /healthz
port: 8585
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8585
initialDelaySeconds: 5
periodSeconds: 10
---
# Create NDM cluster exporter deployment.
# This is an optional component and is not required for the basic
# functioning of NDM
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-ndm-cluster-exporter
namespace: openebs
labels:
name: openebs-ndm-cluster-exporter
openebs.io/component-name: ndm-cluster-exporter
openebs.io/version: 3.3.0
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
name: openebs-ndm-cluster-exporter
openebs.io/component-name: ndm-cluster-exporter
template:
metadata:
labels:
name: openebs-ndm-cluster-exporter
openebs.io/component-name: ndm-cluster-exporter
openebs.io/version: 3.3.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: ndm-cluster-exporter
image: openebs/node-disk-exporter:2.0.0
command:
- /usr/local/bin/exporter
args:
- "start"
- "--mode=cluster"
- "--port=$(METRICS_LISTEN_PORT)"
- "--metrics=/metrics"
ports:
- containerPort: 9100
protocol: TCP
name: metrics
imagePullPolicy: IfNotPresent
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: METRICS_LISTEN_PORT
value: :9100
---
# Create NDM cluster exporter service
# This is optional and required only when
# ndm-cluster-exporter deployment is used
apiVersion: v1
kind: Service
metadata:
name: openebs-ndm-cluster-exporter-service
namespace: openebs
labels:
name: openebs-ndm-cluster-exporter-service
openebs.io/component-name: ndm-cluster-exporter
app: openebs-ndm-exporter
spec:
clusterIP: None
ports:
- name: metrics
port: 9100
targetPort: 9100
selector:
name: openebs-ndm-cluster-exporter
---
# Create NDM node exporter daemonset.
# This is an optional component used for getting disk level
# metrics from each of the storage nodes
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: openebs-ndm-node-exporter
namespace: openebs
labels:
name: openebs-ndm-node-exporter
openebs.io/component-name: ndm-node-exporter
openebs.io/version: 3.3.0
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
name: openebs-ndm-node-exporter
openebs.io/component-name: ndm-node-exporter
template:
metadata:
labels:
name: openebs-ndm-node-exporter
openebs.io/component-name: ndm-node-exporter
openebs.io/version: 3.3.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: node-disk-exporter
image: openebs/node-disk-exporter:2.0.0
command:
- /usr/local/bin/exporter
args:
- "start"
- "--mode=node"
- "--port=$(METRICS_LISTEN_PORT)"
- "--metrics=/metrics"
ports:
- containerPort: 9101
protocol: TCP
name: metrics
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: METRICS_LISTEN_PORT
value: :9101
---
# Create NDM node exporter service
# This is optional and required only when
# ndm-node-exporter daemonset is used
apiVersion: v1
kind: Service
metadata:
name: openebs-ndm-node-exporter-service
namespace: openebs
labels:
name: openebs-ndm-node-exporter
openebs.io/component: openebs-ndm-node-exporter
app: openebs-ndm-exporter
spec:
clusterIP: None
ports:
- name: metrics
port: 9101
targetPort: 9101
selector:
name: openebs-ndm-node-exporter
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-localpv-provisioner
namespace: openebs
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 3.3.0
spec:
selector:
matchLabels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 3.3.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: openebs-provisioner-hostpath
imagePullPolicy: IfNotPresent
image: openebs/provisioner-localpv:3.3.0
args:
- "--bd-time-out=$(BDC_BD_BIND_RETRIES)"
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# This sets the number of times the provisioner should try
# with a polling interval of 5 seconds, to get the Blockdevice
# Name from a BlockDeviceClaim, before the BlockDeviceClaim
# is deleted. E.g. 12 * 5 seconds = 60 seconds timeout
- name: BDC_BD_BIND_RETRIES
value: "12"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "true"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator-lite"
- name: OPENEBS_IO_HELPER_IMAGE
value: "openebs/linux-utils:3.3.0"
- name: OPENEBS_IO_BASE_PATH
value: "/var/openebs/local"
# LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
# leader election is enabled.
#- name: LEADER_ELECTION_ENABLED
# value: "true"
# OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets
# to the helper pod launched by local-pv hostpath provisioner
#- name: OPENEBS_IO_IMAGE_PULL_SECRETS
# value: ""
# Process name used for matching is limited to the 15 characters
# present in the pgrep output.
# So fullname can't be used here with pgrep (>15 chars).A regular expression
# that matches the entire command name has to specified.
# Anchor `^` : matches any string that starts with `provisioner-loc`
# `.*`: matches any string that has `provisioner-loc` followed by zero or more char
livenessProbe:
exec:
command:
- sh
- -c
- test `pgrep -c "^provisioner-loc.*"` = 1
initialDelaySeconds: 30
periodSeconds: 60
---

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,91 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
annotations: null
cephVersion:
allowUnsupported: false
image: quay.io/ceph/ceph:v17.2.3
cleanupPolicy:
allowUninstallWithVolumes: false
confirmation: ''
sanitizeDisks:
dataSource: zero
iteration: 1
method: quick
continueUpgradeAfterChecksEvenIfNotHealthy: false
crashCollector:
disable: false
dashboard:
enabled: true
ssl: true
dataDirHostPath: /var/lib/rook
disruptionManagement:
machineDisruptionBudgetNamespace: openshift-machine-api
manageMachineDisruptionBudgets: false
managePodBudgets: true
osdMaintenanceTimeout: 30
pgHealthCheckTimeout: 0
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
livenessProbe:
mgr:
disabled: false
mon:
disabled: false
osd:
disabled: false
startupProbe:
mgr:
disabled: false
mon:
disabled: false
osd:
disabled: false
labels: null
logCollector:
enabled: true
maxLogSize: 500M
periodicity: daily
mgr:
allowMultiplePerNode: false
count: 2
modules:
- enabled: true
name: pg_autoscaler
mon:
allowMultiplePerNode: false
count: 3
monitoring:
enabled: false
network:
connections:
compression:
enabled: false
encryption:
enabled: false
priorityClassNames:
mgr: system-cluster-critical
mon: system-node-critical
osd: system-node-critical
removeOSDsIfOutAndSafeToRemove: false
resources: null
skipUpgradeChecks: false
storage:
config: null
deviceFilter: ^sd[ab]
onlyApplyOSDPlacement: false
useAllDevices: false
useAllNodes: true
waitTimeoutForHealthyOSDInMinutes: 10

View File

@ -0,0 +1,298 @@
#################################################################################################################
# Define the settings for the rook-ceph cluster with common settings for a production cluster.
# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
# in this example. See the documentation for more details on storage settings available.
# For example, to create the cluster:
# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
# kubectl create -f cluster.yaml
#################################################################################################################
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph # namespace:cluster
spec:
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v16 is Pacific, and v17 is Quincy.
# RECOMMENDATION: In production, use a specific version tag instead of the general v17 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v17.2.3-20220805
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v17.2.3
# Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
# Future versions such as `reef` (v18) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
# The default wait timeout is 10 minutes.
waitTimeoutForHealthyOSDInMinutes: 10
mon:
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 2
allowMultiplePerNode: false
modules:
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
# are already enabled by other settings in the cluster CR.
- name: pg_autoscaler
enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# serve the dashboard using SSL
ssl: true
# enable prometheus alerting for cluster
monitoring:
# requires Prometheus to be pre-installed
enabled: false
network:
connections:
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
encryption:
enabled: false
# Whether to compress the data in transit across the wire. The default is false.
# Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
compression:
enabled: false
# enable host networking
#provider: host
# enable the Multus network provider
#provider: multus
#selectors:
# The selector keys are required to be `public` and `cluster`.
# Based on the configuration, the operator will do the following:
# 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
# 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
#
# In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
#
#public: public-conf --> NetworkAttachmentDefinition object name in Multus
#cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
# Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
#ipFamily: "IPv6"
# Ceph daemons to listen on both IPv4 and Ipv6 networks
#dualStack: false
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
#daysToRetain: 30
# enable log collector, daemons will log on files and rotate
logCollector:
enabled: true
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
# Rook will immediately stop configuring the cluster and only wait for the delete command.
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
confirmation: ""
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
sanitizeDisks:
# method indicates if the entire disk should be sanitized or simply ceph's metadata
# in both case, re-install is possible
# possible choices are 'complete' or 'quick' (default)
method: quick
# dataSource indicate where to get random bytes from to write on the disk
# possible choices are 'zero' (default) or 'random'
# using random sources will consume entropy from the system and will take much more time then the zero source
dataSource: zero
# iteration overwrite N times instead of the default (1)
# takes an integer value
iteration: 1
# allowUninstallWithVolumes defines how the uninstall should be performed
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
allowUninstallWithVolumes: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
# placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
# tolerations:
# - key: storage-node
# operator: Exists
# The above placement information can also be specified for mon, osd, and mgr components
# mon:
# Monitor deployments may contain an anti-affinity rule for avoiding monitor
# collocation on the same node. This is a required rule when host network is used
# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# preferred rule with weight: 50.
# osd:
# prepareosd:
# mgr:
# cleanup:
annotations:
# all:
# mon:
# osd:
# cleanup:
# prepareosd:
# clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets.
# And clusterMetadata annotations will not be merged with `all` annotations.
# clusterMetadata:
# kubed.appscode.com/sync: "true"
# If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
labels:
# all:
# mon:
# osd:
# cleanup:
# mgr:
# prepareosd:
# monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
# These labels can be passed as LabelSelector to Prometheus
# monitoring:
# crashcollector:
resources:
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
# mgr:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# The above example requests/limits can also be added to the other components
# mon:
# osd:
# For OSD it also is a possible to specify requests/limits based on device class
# osd-hdd:
# osd-ssd:
# osd-nvme:
# prepareosd:
# mgr-sidecar:
# crashcollector:
# logcollector:
# cleanup:
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
priorityClassNames:
#all: rook-ceph-default-priority-class
mon: system-node-critical
osd: system-node-critical
mgr: system-cluster-critical
#crashcollector: rook-ceph-crashcollector-priority-class
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: true
#deviceFilter:
config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
onlyApplyOSDPlacement: false
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
pgHealthCheckTimeout: 0
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
# Only available on OpenShift.
manageMachineDisruptionBudgets: false
# Namespace in which to watch for the MachineDisruptionBudgets.
machineDisruptionBudgetNamespace: openshift-machine-api
# healthChecks
# Valid values for daemons are 'mon', 'osd', 'status'
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
# Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons.
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
# Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons.
startupProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,646 @@
#################################################################################################################
# The deployment for the rook operator
# Contains the common settings for most Kubernetes deployments.
# For example, to create the rook-ceph cluster:
# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
# kubectl create -f cluster.yaml
#
# Also see other operator sample files for variations of operator.yaml:
# - operator-openshift.yaml: Common settings for running in OpenShift
###############################################################################################################
# Rook Ceph Operator Config ConfigMap
# Use this ConfigMap to override Rook-Ceph Operator configurations.
# NOTE! Precedence will be given to this config if the same Env Var config also exists in the
# Operator Deployment.
# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
# here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-ceph-operator-config
# should be in the namespace of the operator
namespace: rook-ceph # namespace:operator
data:
# The logging level for the operator: ERROR | WARNING | INFO | DEBUG
ROOK_LOG_LEVEL: "INFO"
# Enable the CSI driver.
# To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
ROOK_CSI_ENABLE_CEPHFS: "true"
# Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
ROOK_CSI_ENABLE_RBD: "true"
# Enable the CSI NFS driver. To start another version of the CSI driver, see image properties below.
ROOK_CSI_ENABLE_NFS: "false"
ROOK_CSI_ENABLE_GRPC_METRICS: "false"
# Set to true to enable Ceph CSI pvc encryption support.
CSI_ENABLE_ENCRYPTION: "false"
# Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance.
# CSI_ENABLE_HOST_NETWORK: "true"
# Set to true to enable adding volume metadata on the CephFS subvolume and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default.
# CSI_ENABLE_METADATA: "true"
# cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
# like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
# CSI_CLUSTER_NAME: "my-prod-cluster"
# Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
# CSI_LOG_LEVEL: "0"
# Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# CSI_SIDECAR_LOG_LEVEL: "0"
# Set replicas for csi provisioner deployment.
CSI_PROVISIONER_REPLICAS: "2"
# OMAP generator will generate the omap mapping between the PV name and the RBD image.
# CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
# By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
# it set it to false.
# CSI_ENABLE_OMAP_GENERATOR: "false"
# set to false to disable deployment of snapshotter container in CephFS provisioner pod.
CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
# set to false to disable deployment of snapshotter container in NFS provisioner pod.
CSI_ENABLE_NFS_SNAPSHOTTER: "true"
# set to false to disable deployment of snapshotter container in RBD provisioner pod.
CSI_ENABLE_RBD_SNAPSHOTTER: "true"
# Enable cephfs kernel driver instead of ceph-fuse.
# If you disable the kernel client, your application may be disrupted during upgrade.
# See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
# NOTE! cephfs quota is not supported in kernel version < 4.17
CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
# (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
CSI_RBD_FSGROUPPOLICY: "File"
# (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
CSI_CEPHFS_FSGROUPPOLICY: "File"
# (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
CSI_NFS_FSGROUPPOLICY: "File"
# (Optional) Allow starting unsupported ceph-csi image
ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
# (Optional) control the host mount of /etc/selinux for csi plugin pods.
CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
# ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.7.2"
# ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1"
# ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.6.0"
# ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v3.3.0"
# ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0"
# ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.0.0"
# To indicate the image pull policy to be applied to all the containers in the csi driver pods.
# ROOK_CSI_IMAGE_PULL_POLICY: "IfNotPresent"
# (Optional) set user created priorityclassName for csi plugin pods.
CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
# (Optional) set user created priorityclassName for csi provisioner pods.
CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
# CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_NFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
# ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
# Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
# ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
# Labels to add to the CSI RBD Deployments and DaemonSets Pods.
# ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
# Labels to add to the CSI NFS Deployments and DaemonSets Pods.
# ROOK_CSI_NFS_POD_LABELS: "key1=value1,key2=value2"
# (Optional) CephCSI CephFS plugin Volumes
# CSI_CEPHFS_PLUGIN_VOLUME: |
# - name: lib-modules
# hostPath:
# path: /run/current-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# (Optional) CephCSI CephFS plugin Volume mounts
# CSI_CEPHFS_PLUGIN_VOLUME_MOUNT: |
# - name: host-nix
# mountPath: /nix
# readOnly: true
# (Optional) CephCSI RBD plugin Volumes
# CSI_RBD_PLUGIN_VOLUME: |
# - name: lib-modules
# hostPath:
# path: /run/current-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# (Optional) CephCSI RBD plugin Volume mounts
# CSI_RBD_PLUGIN_VOLUME_MOUNT: |
# - name: host-nix
# mountPath: /nix
# readOnly: true
# (Optional) CephCSI provisioner NodeAffinity (applied to both CephFS and RBD provisioner).
# CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_PROVISIONER_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/control-plane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) CephCSI plugin NodeAffinity (applied to both CephFS and RBD plugin).
# CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_PLUGIN_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/control-plane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) CephCSI RBD provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
# (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_RBD_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/rbd
# operator: Exists
# (Optional) CephCSI RBD plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
# (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_RBD_PLUGIN_TOLERATIONS: |
# - key: node.rook.io/rbd
# operator: Exists
# (Optional) CephCSI CephFS provisioner NodeAffinity (if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
# (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/cephfs
# operator: Exists
# (Optional) CephCSI CephFS plugin NodeAffinity (if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
# NOTE: Support for defining NodeAffinity for operators other than "In" and "Exists" requires the user to input a
# valid v1.NodeAffinity JSON or YAML string. For example, the following is valid YAML v1.NodeAffinity:
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: |
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: myKey
# operator: DoesNotExist
# (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_CEPHFS_PLUGIN_TOLERATIONS: |
# - key: node.rook.io/cephfs
# operator: Exists
# (Optional) CephCSI NFS provisioner NodeAffinity (overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_NFS_PROVISIONER_NODE_AFFINITY: "role=nfs-node"
# (Optional) CephCSI NFS provisioner tolerations list (overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_NFS_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/nfs
# operator: Exists
# (Optional) CephCSI NFS plugin NodeAffinity (overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_NFS_PLUGIN_NODE_AFFINITY: "role=nfs-node"
# (Optional) CephCSI NFS plugin tolerations list (overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_NFS_PLUGIN_TOLERATIONS: |
# - key: node.rook.io/nfs
# operator: Exists
# (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
#CSI_RBD_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-resizer
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-snapshotter
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-rbdplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : csi-omap-generator
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
#CSI_RBD_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# - name : csi-rbdplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
#CSI_CEPHFS_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-resizer
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-snapshotter
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-cephfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
#CSI_CEPHFS_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# - name : csi-cephfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# (Optional) CEPH CSI NFS provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
# CSI_NFS_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# cpu: 200m
# - name : csi-nfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# (Optional) CEPH CSI NFS plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
# CSI_NFS_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# cpu: 100m
# - name : csi-nfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# cpu: 500m
# Configure CSI Ceph FS grpc and liveness metrics port
# Set to true to enable Ceph CSI liveness container.
CSI_ENABLE_LIVENESS: "false"
# CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
# CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
# Configure CSI RBD grpc and liveness metrics port
CSI_RBD_GRPC_METRICS_PORT: "9092"
# CSI_RBD_LIVENESS_METRICS_PORT: "9080"
# CSIADDONS_PORT: "9070"
# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
# Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
# This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
ROOK_ENABLE_DISCOVERY_DAEMON: "false"
# The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
# Enable the csi addons sidecar.
CSI_ENABLE_CSIADDONS: "false"
# ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
# The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
CSI_GRPC_TIMEOUT_SECONDS: "150"
# Enable topology based provisioning.
CSI_ENABLE_TOPOLOGY: "false"
# Domain labels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# CSI_TOPOLOGY_DOMAIN_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
---
# OLM: BEGIN OPERATOR DEPLOYMENT
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-operator
namespace: rook-ceph # namespace:operator
labels:
operator: rook
storage-backend: ceph
app.kubernetes.io/name: rook-ceph
app.kubernetes.io/instance: rook-ceph
app.kubernetes.io/component: rook-ceph-operator
app.kubernetes.io/part-of: rook-ceph-operator
spec:
selector:
matchLabels:
app: rook-ceph-operator
strategy:
type: Recreate
replicas: 1
template:
metadata:
labels:
app: rook-ceph-operator
spec:
serviceAccountName: rook-ceph-system
containers:
- name: rook-ceph-operator
image: rook/ceph:v1.10.4
args: ["ceph", "operator"]
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
volumeMounts:
- mountPath: /var/lib/rook
name: rook-config
- mountPath: /etc/ceph
name: default-config-dir
- mountPath: /etc/webhook
name: webhook-cert
ports:
- containerPort: 9443
name: https-webhook
protocol: TCP
env:
# If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
# If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
- name: ROOK_CURRENT_NAMESPACE_ONLY
value: "false"
# Rook Discover toleration. Will tolerate all taints with all keys.
# Choose between NoSchedule, PreferNoSchedule and NoExecute:
# - name: DISCOVER_TOLERATION
# value: "NoSchedule"
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
# - name: DISCOVER_TOLERATION_KEY
# value: "<KeyOfTheTaintToTolerate>"
# (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
# - name: DISCOVER_TOLERATIONS
# value: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/control-plane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) Rook Discover priority class name to set on the pod(s)
# - name: DISCOVER_PRIORITY_CLASS_NAME
# value: "<PriorityClassName>"
# (Optional) Discover Agent NodeAffinity.
# - name: DISCOVER_AGENT_NODE_AFFINITY
# value: "role=storage-node; storage=rook, ceph"
# (Optional) Discover Agent Pod Labels.
# - name: DISCOVER_AGENT_POD_LABELS
# value: "key1=value1,key2=value2"
# The duration between discovering devices in the rook-discover daemonset.
- name: ROOK_DISCOVER_DEVICES_INTERVAL
value: "60m"
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
# Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "false"
# Disable automatic orchestration when new devices are discovered
- name: ROOK_DISABLE_DEVICE_HOTPLUG
value: "false"
# Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
# In case of more than one regex, use comma to separate between them.
# Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
# Add regex expression after putting a comma to blacklist a disk
# If value is empty, the default regex will be used.
- name: DISCOVER_DAEMON_UDEV_BLACKLIST
value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
# Time to wait until the node controller will move Rook pods to other
# nodes after detecting an unreachable node.
# Pods affected by this setting are:
# mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
# The value used in this variable replaces the default value of 300 secs
# added automatically by k8s as Toleration for
# <node.kubernetes.io/unreachable>
# The total amount of time to reschedule Rook pods in healthy nodes
# before detecting a <not ready node> condition will be the sum of:
# --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
# --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
- name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
value: "5"
- name: ROOK_DISABLE_ADMISSION_CONTROLLER
value: "false"
# The name of the node to pass with the downward API
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# The pod name to pass with the downward API
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# The pod namespace to pass with the downward API
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# Recommended resource requests and limits, if desired
#resources:
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Uncomment it to run lib bucket provisioner in multithreaded mode
#- name: LIB_BUCKET_PROVISIONER_THREADS
# value: "5"
# Uncomment it to run rook operator on the host network
#hostNetwork: true
volumes:
- name: rook-config
emptyDir: {}
- name: default-config-dir
emptyDir: {}
- name: webhook-cert
emptyDir: {}
# OLM: END OPERATOR DEPLOYMENT