diff --git a/README.md b/README.md index 827cdb9..5c6dab4 100644 --- a/README.md +++ b/README.md @@ -24,3 +24,14 @@ kubectl apply -f tiller.yaml helm init --service-account tiller + +## rook.io + + helm repo add rook-beta https://charts.rook.io/beta + helm install --namespace rook-ceph-system rook-beta/rook-ceph + kubectl apply -f rook-cluster.yaml + kubectl apply -f rook-block.yaml + kubectl patch storageclass rook-ceph-block -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' + kubectl apply -f rook-filesystem.yaml + kubectl apply -f rook-object.yaml + diff --git a/rook-block.yaml b/rook-block.yaml new file mode 100644 index 0000000..a053ecf --- /dev/null +++ b/rook-block.yaml @@ -0,0 +1,24 @@ +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + failureDomain: host + replicated: + size: 3 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block +provisioner: ceph.rook.io/block +parameters: + blockPool: replicapool + # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist + clusterNamespace: rook-ceph + # Specify the filesystem type of the volume. If not specified, it will use `ext4`. + fstype: xfs +# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ +reclaimPolicy: Retain + diff --git a/rook-cluster.yaml b/rook-cluster.yaml new file mode 100644 index 0000000..a17a471 --- /dev/null +++ b/rook-cluster.yaml @@ -0,0 +1,186 @@ +################################################################################# +# This example first defines some necessary namespace and RBAC security objects. +# The actual Ceph Cluster CRD example can be found at the bottom of this example. +################################################################################# +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph +--- +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-osd + namespace: rook-ceph +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: [ "get", "list", "watch", "create", "update", "delete" ] +--- +# Aspects of ceph-mgr that require access to the system namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Aspects of ceph-mgr that operate within the cluster's namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +rules: +- apiGroups: + - "" + resources: + - pods + - services + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +--- +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-cluster-mgmt + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-cluster-mgmt +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# Allow the osd pods in this namespace to work with configmaps +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-osd +subjects: +- kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph +--- +# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +# Allow the ceph mgr to access the rook system resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr-system +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-cluster + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +################################################################################# +# The Ceph Cluster CRD example +################################################################################# +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + cephVersion: + # For the latest ceph images, see https://hub.docker.com/r/ceph/ceph/tags + image: ceph/ceph:v13.2.4-20190109 + dataDirHostPath: /rook + dashboard: + enabled: true + mon: + count: 3 + allowMultiplePerNode: true + storage: + useAllNodes: true + useAllDevices: false + config: + databaseSizeMB: "1024" + journalSizeMB: "1024" + diff --git a/rook-filesystem.yaml b/rook-filesystem.yaml new file mode 100644 index 0000000..08838f2 --- /dev/null +++ b/rook-filesystem.yaml @@ -0,0 +1,15 @@ +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: myfs + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: 3 + dataPools: + - replicated: + size: 3 + metadataServer: + activeCount: 1 + activeStandby: true diff --git a/rook-object.yaml b/rook-object.yaml new file mode 100644 index 0000000..b5e5e57 --- /dev/null +++ b/rook-object.yaml @@ -0,0 +1,23 @@ +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: my-store + namespace: rook-ceph +spec: + metadataPool: + failureDomain: host + replicated: + size: 3 + dataPool: + failureDomain: host + erasureCoded: + dataChunks: 2 + codingChunks: 1 + gateway: + type: s3 + sslCertificateRef: + port: 80 + securePort: + instances: 1 + allNodes: false +