wget https://v.vimll.com:9999/download/hs/local-storage.yaml
cat local-storage.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
wget https://v.vimll.com:9999/download/hs/local-volume-provisioner.generated.yaml
cat local-volume-provisioner.generated.yaml
---
# Source: provisioner/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-disks-provisioner
namespace: local-disks
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
---
# Source: provisioner/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: local-disks-provisioner-config
namespace: local-disks
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
data:
storageClassMap: |
local-disks:
hostDir: /var/data
mountDir: /var/data
blockCleanerCommand:
- "/scripts/shred.sh"
- "2"
volumeMode: Filesystem
fsType: ext4
namePattern: "*"
---
# Source: provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-disks-provisioner-node-clusterrole
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
# Source: provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-disks-provisioner-pv-binding
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
subjects:
- kind: ServiceAccount
name: local-disks-provisioner
namespace: local-disks
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
# Source: provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-disks-provisioner-node-binding
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
subjects:
- kind: ServiceAccount
name: local-disks-provisioner
namespace: local-disks
roleRef:
kind: ClusterRole
name: local-disks-provisioner-node-clusterrole
apiGroup: rbac.authorization.k8s.io
---
# Source: provisioner/templates/daemonset_linux.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: local-disks-provisioner
namespace: local-disks
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
spec:
selector:
matchLabels:
app.kubernetes.io/name: provisioner
app.kubernetes.io/instance: local-disks
template:
metadata:
labels:
app.kubernetes.io/name: provisioner
app.kubernetes.io/instance: local-disks
annotations:
checksum/config: 929b8aa3a630a896cc3cf04acc30fa747f229e602691dfd0f33abbb902d2202a
spec:
serviceAccountName: local-disks-provisioner
nodeSelector:
kubernetes.io/os: linux
containers:
- name: provisioner
image: quay.io/external_storage/local-volume-provisioner:v2.4.0
securityContext:
privileged: true
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
value: quay.io/external_storage/local-volume-provisioner:v2.4.0
ports:
- name: metrics
containerPort: 8080
volumeMounts:
- name: provisioner-config
mountPath: /etc/provisioner/config
readOnly: true
- name: provisioner-dev
mountPath: /dev
- name: local-disks
mountPath: /var/data
mountPropagation: HostToContainer
volumes:
- name: provisioner-config
configMap:
name: local-disks-provisioner-config
- name: provisioner-dev
hostPath:
path: /dev
- name: local-disks
hostPath:
path: /var/data
---
# Source: provisioner/templates/daemonset_windows.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: local-disks-provisioner-win
namespace: local-disks
labels:
helm.sh/chart: provisioner-2.5.1
app.kubernetes.io/name: provisioner
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/instance: local-disks
spec:
selector:
matchLabels:
app.kubernetes.io/name: provisioner
app.kubernetes.io/instance: local-disks
template:
metadata:
labels:
app.kubernetes.io/name: provisioner
app.kubernetes.io/instance: local-disks
annotations:
checksum/config: 929b8aa3a630a896cc3cf04acc30fa747f229e602691dfd0f33abbb902d2202a
spec:
serviceAccountName: local-disks-provisioner
nodeSelector:
kubernetes.io/os: windows
tolerations:
# an empty key operator Exists matches all keys, values and effects
# which meants that this will tolerate everything
- operator: "Exists"
containers:
- name: provisioner
image: quay.io/external_storage/local-volume-provisioner:v2.4.0
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
value: quay.io/external_storage/local-volume-provisioner:v2.4.0
ports:
- name: metrics
containerPort: 8080
volumeMounts:
- name: provisioner-config
mountPath: /etc/provisioner/config
readOnly: true
- name: provisioner-dev
mountPath: /dev
- name: local-disks
mountPath: /var/data
mountPropagation: HostToContainer
- name: csi-proxy-volume-v1
mountPath: \\.\pipe\csi-proxy-volume-v1
- name: csi-proxy-filesystem-v1
mountPath: \\.\pipe\csi-proxy-filesystem-v1
- name: csi-proxy-disk-v1
mountPath: \\.\pipe\csi-proxy-disk-v1
# these paths are still included for compatibility, they're used
# only if the node has still the beta version of the CSI proxy
- name: csi-proxy-volume-v1beta2
mountPath: \\.\pipe\csi-proxy-volume-v1beta2
- name: csi-proxy-filesystem-v1beta1
mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1
- name: csi-proxy-disk-v1beta2
mountPath: \\.\pipe\csi-proxy-disk-v1beta2
volumes:
- name: csi-proxy-disk-v1
hostPath:
path: \\.\pipe\csi-proxy-disk-v1
type: ""
- name: csi-proxy-volume-v1
hostPath:
path: \\.\pipe\csi-proxy-volume-v1
type: ""
- name: csi-proxy-filesystem-v1
hostPath:
path: \\.\pipe\csi-proxy-filesystem-v1
type: ""
# these paths are still included for compatibility, they're used
# only if the node has still the beta version of the CSI proxy
- name: csi-proxy-disk-v1beta2
hostPath:
path: \\.\pipe\csi-proxy-disk-v1beta2
type: ""
- name: csi-proxy-volume-v1beta2
hostPath:
path: \\.\pipe\csi-proxy-volume-v1beta2
type: ""
- name: csi-proxy-filesystem-v1beta1
hostPath:
path: \\.\pipe\csi-proxy-filesystem-v1beta1
type: ""
- name: provisioner-config
configMap:
name: local-disks-provisioner-config
- name: provisioner-dev
hostPath:
path: "C:\\dev"
# If nothing exists at the given path, an empty directory will be
# created there as needed with permission set to 0755,
# having the same group and ownership with Kubelet.
type: DirectoryOrCreate
- name: local-disks
hostPath:
path: /var/data
配置默认存储:
kubectl patch storageclass local-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
使用:
mkdir /data/pxc_app1 /var/data/pxc_app1
echo '/data/pxc_app1 /var/data/pxc_app1 none bind 0 0' >> /etc/fstab
mount -a
试例 pxc集群持久化 storageClass: "local-disks"
persistence:
enabled: true
storageClass: "local-disks"
accessMode: ReadWriteOnce
size: 300Gi