Skip to content

Commit

Permalink
Showing 6 changed files with 135 additions and 11 deletions.
3 changes: 3 additions & 0 deletions deploy/backupstores/minio-backupstore.yaml
Original file line number Diff line number Diff line change
@@ -2,6 +2,7 @@ apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: default
type: Opaque
data:
AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key
@@ -24,6 +25,7 @@ apiVersion: v1
kind: Pod
metadata:
name: longhorn-test-minio
namespace: default
labels:
app: longhorn-test-minio
spec:
@@ -55,6 +57,7 @@ apiVersion: v1
kind: Service
metadata:
name: minio-service
namespace: default
spec:
selector:
app: longhorn-test-minio
2 changes: 2 additions & 0 deletions deploy/backupstores/nfs-backupstore.yaml
Original file line number Diff line number Diff line change
@@ -2,6 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
name: longhorn-test-nfs
namespace: default
labels:
app: longhorn-test-nfs
spec:
@@ -37,6 +38,7 @@ kind: Service
apiVersion: v1
metadata:
name: longhorn-test-nfs-svc
namespace: default
spec:
selector:
app: longhorn-test-nfs
64 changes: 56 additions & 8 deletions deploy/longhorn.yaml
Original file line number Diff line number Diff line change
@@ -39,7 +39,7 @@ rules:
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -163,6 +163,45 @@ spec:
scope: Namespaced
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: InstanceManager
name: instancemanagers.longhorn.rancher.io
spec:
group: longhorn.rancher.io
names:
kind: InstanceManager
listKind: InstanceManagerList
plural: instancemanagers
shortNames:
- lhim
singular: instancemanager
scope: Namespaced
version: v1alpha1
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target:
backup-target-credential-secret:
create-default-disk-labeled-nodes:
default-data-path:
replica-soft-anti-affinity:
storage-over-provisioning-percentage:
storage-minimal-available-percentage:
upgrade-checker:
default-replica-count:
guaranteed-engine-cpu:
default-longhorn-static-storage-class:
backupstore-poll-interval:
taint-toleration:
---
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
@@ -181,7 +220,7 @@ spec:
spec:
containers:
- name: longhorn-manager
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
imagePullPolicy: Always
securityContext:
privileged: true
@@ -190,9 +229,9 @@ spec:
- -d
- daemon
- --engine-image
- rancher/longhorn-engine:v0.5.0
- longhornio/longhorn-engine:v0.6.0-rc1
- --manager-image
- rancher/longhorn-manager:v0.5.0
- longhornio/longhorn-manager:v0.6.0-rc1
- --service-account
- longhorn-service-account
ports:
@@ -207,6 +246,8 @@ spec:
- name: longhorn
mountPath: /var/lib/rancher/longhorn/
mountPropagation: Bidirectional
- name: longhorn-default-setting
mountPath: /var/lib/longhorn/setting/
env:
- name: POD_NAMESPACE
valueFrom:
@@ -220,6 +261,9 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Should be: mount path of the volume longhorn-default-setting + the key of the configmap data in 04-default-setting.yaml
- name: DEFAULT_SETTING_PATH
value: /var/lib/longhorn/setting/default-setting.yaml
volumes:
- name: dev
hostPath:
@@ -233,6 +277,9 @@ spec:
- name: longhorn
hostPath:
path: /var/lib/rancher/longhorn/
- name: longhorn-default-setting
configMap:
name: longhorn-default-setting
serviceAccountName: longhorn-service-account
---
kind: Service
@@ -269,12 +316,13 @@ spec:
spec:
containers:
- name: longhorn-ui
image: rancher/longhorn-ui:v0.5.0
image: longhornio/longhorn-ui:v0.6.0-rc1
ports:
- containerPort: 8000
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
serviceAccountName: longhorn-service-account
---
kind: Service
apiVersion: v1
@@ -308,18 +356,18 @@ spec:
spec:
initContainers:
- name: wait-longhorn-manager
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
imagePullPolicy: Always
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- rancher/longhorn-manager:v0.5.0
- longhornio/longhorn-manager:v0.6.0-rc1
- --manager-url
- http://longhorn-backend:9500/v1
# manually choose "flexvolume" or "csi"
2 changes: 1 addition & 1 deletion examples/deployment.yaml
Original file line number Diff line number Diff line change
@@ -23,7 +23,7 @@ spec:
requests:
storage: 2Gi
---
apiVersion: apps/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
69 changes: 69 additions & 0 deletions scripts/lhexec
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
#!/usr/bin/env bash

NS="longhorn-system"

print_usage() {
echo "Usage: ${0} [|-h|--help] volume_name longhorn_commands_arguments"
echo ""
echo "Examples:"
echo " ${0} test-vol snapshot ls"
echo " ${0} test-vol info"
echo ""
echo "Note: Must have Longhorn installed in "longhorn-system" namespace and have access to "kubectl" and the namespace"
echo ""
exit 0
}

check_volume_exist(){
VOLUME_NAME=${1}
kubectl -n ${NS} get lhv ${VOLUME_NAME} > /dev/null 2>&1
if [[ ${?} -ne 0 ]]; then
echo "Err: Volume ${VOLUME_NAME} not found"
exit 1
fi
}

check_engine_state(){
VOLUME_NAME=${1}
LHE_STATE_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.currentState}"
LHE_STATE=`kubectl -n ${NS} get lhe --output=jsonpath="${LHE_STATE_FILTER}"`

if [[ ${LHE_STATE} != "running" ]]; then
echo "Err: Longhorn engine for volume ${VOLUME_NAME} is not running"
exit 1
fi

}

exec_command() {
VOLUME_NAME=${1}
COMMAND_ARGS="${@:2}"

INSTANCE_MANAGER_NAME_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.instanceManagerName}"
INSTANCE_MANAGER_NAME=`kubectl -n ${NS} get lhe --output=jsonpath="${INSTANCE_MANAGER_NAME_FILTER}"`

ENGINE_PORT_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.port}"
ENGINE_PORT=`kubectl -n ${NS} get lhe --output=jsonpath="${ENGINE_PORT_FILTER}"`

kubectl -n ${NS} exec -it ${INSTANCE_MANAGER_NAME} -- bash -c "longhorn --url localhost:${ENGINE_PORT} ${COMMAND_ARGS}"

}


ARG=$1
case $ARG in
"" | "-h" | "--help")
print_usage
;;
*)
VOLUME_NAME=${ARG}
shift
COMMAND_ARGS="${@}"
if [[ ${COMMAND_ARGS} == "" ]]; then
COMMAND_ARGS="help"
fi
check_volume_exist ${VOLUME_NAME}
check_engine_state ${VOLUME_NAME}
exec_command ${VOLUME_NAME} ${COMMAND_ARGS}
;;
esac
6 changes: 4 additions & 2 deletions uninstall/uninstall.yaml
Original file line number Diff line number Diff line change
@@ -2,6 +2,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-uninstall-service-account
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@@ -24,7 +25,7 @@ rules:
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -44,6 +45,7 @@ apiVersion: batch/v1
kind: Job
metadata:
name: longhorn-uninstall
namespace: default
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
@@ -53,7 +55,7 @@ spec:
spec:
containers:
- name: longhorn-uninstall
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
imagePullPolicy: Always
command:
- longhorn-manager

0 comments on commit c8d39af

Please sign in to comment.