diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Prometheus/service.yaml b/Prometheus/service.yaml new file mode 100644 index 0000000..2b7ec5f --- /dev/null +++ b/Prometheus/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-service + labels: + app: prometheus +spec: + ports: + - port: 9090 + selector: + app: prometheus + type: ClusterIP + + diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Prometheus/service.yaml b/Prometheus/service.yaml new file mode 100644 index 0000000..2b7ec5f --- /dev/null +++ b/Prometheus/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-service + labels: + app: prometheus +spec: + ports: + - port: 9090 + selector: + app: prometheus + type: ClusterIP + + diff --git a/Selenium/kustomization.yaml b/Selenium/kustomization.yaml new file mode 100644 index 0000000..0262a4e --- /dev/null +++ b/Selenium/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: selenium +resources: +- ./namespace.yaml +- ./selenium-hub-deployment.yaml +- ./selenium-hub-service.yaml +- ./selenium-node-chrome-deployment.yaml diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Prometheus/service.yaml b/Prometheus/service.yaml new file mode 100644 index 0000000..2b7ec5f --- /dev/null +++ b/Prometheus/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-service + labels: + app: prometheus +spec: + ports: + - port: 9090 + selector: + app: prometheus + type: ClusterIP + + diff --git a/Selenium/kustomization.yaml b/Selenium/kustomization.yaml new file mode 100644 index 0000000..0262a4e --- /dev/null +++ b/Selenium/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: selenium +resources: +- ./namespace.yaml +- ./selenium-hub-deployment.yaml +- ./selenium-hub-service.yaml +- ./selenium-node-chrome-deployment.yaml diff --git a/Selenium/namespace.yaml b/Selenium/namespace.yaml new file mode 100644 index 0000000..ed98245 --- /dev/null +++ b/Selenium/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: selenium diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Prometheus/service.yaml b/Prometheus/service.yaml new file mode 100644 index 0000000..2b7ec5f --- /dev/null +++ b/Prometheus/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-service + labels: + app: prometheus +spec: + ports: + - port: 9090 + selector: + app: prometheus + type: ClusterIP + + diff --git a/Selenium/kustomization.yaml b/Selenium/kustomization.yaml new file mode 100644 index 0000000..0262a4e --- /dev/null +++ b/Selenium/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: selenium +resources: +- ./namespace.yaml +- ./selenium-hub-deployment.yaml +- ./selenium-hub-service.yaml +- ./selenium-node-chrome-deployment.yaml diff --git a/Selenium/namespace.yaml b/Selenium/namespace.yaml new file mode 100644 index 0000000..ed98245 --- /dev/null +++ b/Selenium/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: selenium diff --git a/Selenium/selenium-hub-deployment.yaml b/Selenium/selenium-hub-deployment.yaml new file mode 100644 index 0000000..2b0fcf8 --- /dev/null +++ b/Selenium/selenium-hub-deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: selenium-hub + labels: + app: selenium-hub +spec: + replicas: 1 + selector: + matchLabels: + app: selenium-hub + template: + metadata: + labels: + app: selenium-hub + spec: + containers: + - name: selenium-hub + image: selenium/hub:latest + ports: + - containerPort: 4444 + resources: + limits: + memory: 200Mi + cpu: ".3" + livenessProbe: + httpGet: + path: /wd/hub/status + port: 4444 + initialDelaySeconds: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /wd/hub/status + port: 4444 + initialDelaySeconds: 10 + timeoutSeconds: 5 diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Prometheus/service.yaml b/Prometheus/service.yaml new file mode 100644 index 0000000..2b7ec5f --- /dev/null +++ b/Prometheus/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-service + labels: + app: prometheus +spec: + ports: + - port: 9090 + selector: + app: prometheus + type: ClusterIP + + diff --git a/Selenium/kustomization.yaml b/Selenium/kustomization.yaml new file mode 100644 index 0000000..0262a4e --- /dev/null +++ b/Selenium/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: selenium +resources: +- ./namespace.yaml +- ./selenium-hub-deployment.yaml +- ./selenium-hub-service.yaml +- ./selenium-node-chrome-deployment.yaml diff --git a/Selenium/namespace.yaml b/Selenium/namespace.yaml new file mode 100644 index 0000000..ed98245 --- /dev/null +++ b/Selenium/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: selenium diff --git a/Selenium/selenium-hub-deployment.yaml b/Selenium/selenium-hub-deployment.yaml new file mode 100644 index 0000000..2b0fcf8 --- /dev/null +++ b/Selenium/selenium-hub-deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: selenium-hub + labels: + app: selenium-hub +spec: + replicas: 1 + selector: + matchLabels: + app: selenium-hub + template: + metadata: + labels: + app: selenium-hub + spec: + containers: + - name: selenium-hub + image: selenium/hub:latest + ports: + - containerPort: 4444 + resources: + limits: + memory: 200Mi + cpu: ".3" + livenessProbe: + httpGet: + path: /wd/hub/status + port: 4444 + initialDelaySeconds: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /wd/hub/status + port: 4444 + initialDelaySeconds: 10 + timeoutSeconds: 5 diff --git a/Selenium/selenium-hub-service.yaml b/Selenium/selenium-hub-service.yaml new file mode 100644 index 0000000..c2ff064 --- /dev/null +++ b/Selenium/selenium-hub-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: selenium-hub + labels: + app: selenium-hub +spec: + ports: + - port: 4444 + targetPort: 4444 + name: hub-port + selector: + app: selenium-hub + type: ClusterIP diff --git a/AutoProvision/depoloyment.yaml b/AutoProvision/depoloyment.yaml new file mode 100644 index 0000000..dbfbc52 --- /dev/null +++ b/AutoProvision/depoloyment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "nfs-provision" +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner + namespace: nfs-provision + labels: + app: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: rkevin/nfs-subdir-external-provisioner:fix-k8s-1.20 + resources: + limits: + cpu: "50m" + memory: "50Mi" + requests: + cpu: "25m" + memory: "25Mi" + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-auto-provisioner + - name: NFS_SERVER + value: 192.168.68.103 + - name: NFS_PATH + value: /k8s-data + volumes: + - name: nfs-client-root + nfs: + server: 192.168.68.103 + path: /k8s-data diff --git a/AutoProvision/serviceaccount.yaml b/AutoProvision/serviceaccount.yaml new file mode 100644 index 0000000..a1f3895 --- /dev/null +++ b/AutoProvision/serviceaccount.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + namespace: nfs-provision + name: nfs-client-provisioner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provision +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: nfs-provision +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + # replace with namespace where provisioner is deployed + namespace: nfs-provision +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/AutoProvision/storage_class.yaml b/AutoProvision/storage_class.yaml new file mode 100644 index 0000000..6c208b4 --- /dev/null +++ b/AutoProvision/storage_class.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: fast +provisioner: nfs-auto-provisioner +parameters: + archiveOnDelete: "false" diff --git a/Grafana/deployment.yaml b/Grafana/deployment.yaml new file mode 100644 index 0000000..9c0ff6d --- /dev/null +++ b/Grafana/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana-deployment + labels: + app: grafana +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana + imagePullPolicy: IfNotPresent + env: + - name: GF_SERVER_ROOT_URL + value: /kubernetes/grafana + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /robots.txt + port: 3000 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-config + volumes: + - name: grafana-config + persistentVolumeClaim: + claimName: pvc-grafana diff --git a/Grafana/ingress.yaml b/Grafana/ingress.yaml new file mode 100644 index 0000000..4b039b8 --- /dev/null +++ b/Grafana/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeale-ingress-type: "minion" + #nignx.ingress.kubernetes.io/rewrite-target: /$2 + name: grafana + labels: + app: grafana +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: grafana-service + port: + number: 3000 + path: /kubernetes/grafana(/|$)(.*) + pathType: Exact diff --git a/Grafana/kustomization.yaml b/Grafana/kustomization.yaml new file mode 100644 index 0000000..98f55aa --- /dev/null +++ b/Grafana/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./pvc.yaml +- ./deployment.yaml +- ./service.yaml +- ./ingress.yaml + diff --git a/Grafana/pvc.yaml b/Grafana/pvc.yaml new file mode 100644 index 0000000..054fd2b --- /dev/null +++ b/Grafana/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-grafana + labels: + app: grafana + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Grafana/service.yaml b/Grafana/service.yaml new file mode 100644 index 0000000..75daf32 --- /dev/null +++ b/Grafana/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: grafana-service + labels: + app: grafana +spec: + ports: + - port: 3000 + protocol: TCP + selector: + app: grafana + type: ClusterIP + diff --git a/Misc/rolebinding_axieyangb.yaml b/Misc/rolebinding_axieyangb.yaml new file mode 100644 index 0000000..981ed08 --- /dev/null +++ b/Misc/rolebinding_axieyangb.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: ClusterRoleBinding +metadata: + name: axieyangb-admin-role +subjects: +- kind: User + name: axieyangb@gmail.com # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io diff --git a/MongoDb/deployment.yaml b/MongoDb/deployment.yaml new file mode 100644 index 0000000..f7c330a --- /dev/null +++ b/MongoDb/deployment.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb + namespace: database + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: mongodb + namespace: database +spec: + ports: + - port: 27017 + selector: + app: mongodb + type: NodePort +--- +apiVersion: v1 +kind: Secret +metadata: + name: mongodb-secret + namespace: database +type: Opaque +data: + MONGO_INITDB_ROOT_USERNAME: cm9vdA== + MONGO_INITDB_ROOT_PASSWORD: UGE3NTIzdzByZA== +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb + namespace: database +spec: + selector: + matchLabels: + app: mongodb + template: + metadata: + labels: + app: mongodb + spec: + containers: + - image: mongo + name: mongodb + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 15 + periodSeconds: 20 + resources: + limits: + memory: "800Mi" + cpu: "500m" + requests: + memory: "400Mi" + cpu: "250m" + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_USERNAME + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mongodb-secret + key: MONGO_INITDB_ROOT_PASSWORD + ports: + - containerPort: 27017 + name: mongo-port + volumeMounts: + - name: mongodb-storage + mountPath: /data/db + volumes: + - name: mongodb-storage + persistentVolumeClaim: + claimName: mongodb diff --git a/OpenLdapServer/ldap-deployment.yaml b/OpenLdapServer/ldap-deployment.yaml new file mode 100644 index 0000000..e02a917 --- /dev/null +++ b/OpenLdapServer/ldap-deployment.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ldap + namespace: ldap-server + labels: + app: ldap +spec: + selector: + matchLabels: + app: ldap + replicas: 1 + template: + metadata: + labels: + app: ldap + spec: + containers: + - name: ldap + image: osixia/openldap:1.4.0 + resources: + requests: + cpu: "100m" + memory: "100Mi" + limits: + cpu: "200m" + memory: "200Mi" + livenessProbe: + tcpSocket: + port: 389 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: ldap-data + mountPath: /var/lib/ldap + readOnly: false + - name: ldap-config + mountPath: /etc/ldap/slapd.d + readOnly: false + - name: ldap-certs + mountPath: /container/service/slapd/assets/certs + readOnly: false + ports: + - containerPort: 389 + name: openldap + env: + - name: LDAP_LOG_LEVEL + value: "256" + - name: LDAP_ORGANISATION + value: "Example Inc." + - name: LDAP_DOMAIN + value: "example.org" + - name: LDAP_ADMIN_PASSWORD + value: "admin" + - name: LDAP_CONFIG_PASSWORD + value: "config" + - name: LDAP_READONLY_USER + value: "false" + - name: LDAP_READONLY_USER_USERNAME + value: "readonly" + - name: LDAP_READONLY_USER_PASSWORD + value: "readonly" + - name: LDAP_RFC2307BIS_SCHEMA + value: "false" + - name: LDAP_BACKEND + value: "mdb" + - name: LDAP_TLS + value: "true" + - name: LDAP_TLS_CRT_FILENAME + value: "ldap.crt" + - name: LDAP_TLS_KEY_FILENAME + value: "ldap.key" + - name: LDAP_TLS_DH_PARAM_FILENAME + value: "dhparam.pem" + - name: LDAP_TLS_CA_CRT_FILENAME + value: "ca.crt" + - name: LDAP_TLS_ENFORCE + value: "false" + - name: LDAP_TLS_CIPHER_SUITE + value: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC" + - name: LDAP_TLS_VERIFY_CLIENT + value: "demand" + - name: LDAP_REPLICATION + value: "false" + - name: LDAP_REPLICATION_CONFIG_SYNCPROV + value: "binddn=\"cn=admin,cn=config\" bindmethod=simple credentials=$LDAP_CONFIG_PASSWORD searchbase=\"cn=config\" type=refreshAndPersist retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_DB_SYNCPROV + value: "binddn=\"cn=admin,$LDAP_BASE_DN\" bindmethod=simple credentials=$LDAP_ADMIN_PASSWORD searchbase=\"$LDAP_BASE_DN\" type=refreshAndPersist interval=00:00:00:10 retry=\"60 +\" timeout=1 starttls=critical" + - name: LDAP_REPLICATION_HOSTS + value: "#PYTHON2BASH:['ldap://ldap-one-service', 'ldap://ldap-two-service']" + - name: KEEP_EXISTING_CONFIG + value: "false" + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + value: "true" + - name: LDAP_SSL_HELPER_PREFIX + value: "ldap" + volumes: + - name: ldap-data + persistentVolumeClaim: + claimName: ldapserver-db + # hostPath: + # path: "/data/ldap/db" + - name: ldap-config + persistentVolumeClaim: + claimName: ldapserver-config + #hostPath: + # path: "/data/ldap/config" + - name: ldap-certs + persistentVolumeClaim: + claimName: ldapserver-certs + + # hostPath: + # path: "/data/ldap/certs" diff --git a/OpenLdapServer/ldap-pvc.yaml b/OpenLdapServer/ldap-pvc.yaml new file mode 100644 index 0000000..a42849c --- /dev/null +++ b/OpenLdapServer/ldap-pvc.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-db + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 500Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-certs + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ldapserver-config + namespace: ldap-server + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Mi diff --git a/OpenLdapServer/ldap-service.yaml b/OpenLdapServer/ldap-service.yaml new file mode 100644 index 0000000..7047e67 --- /dev/null +++ b/OpenLdapServer/ldap-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: ldap + name: ldap-service + namespace: ldap-server +spec: + ports: + - port: 389 + selector: + app: ldap diff --git a/OpenLdapServer/namespace.yaml b/OpenLdapServer/namespace.yaml new file mode 100644 index 0000000..39be211 --- /dev/null +++ b/OpenLdapServer/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ldap-server diff --git a/Prometheus/.ingress.yaml.swp b/Prometheus/.ingress.yaml.swp new file mode 100644 index 0000000..6a2f7f8 --- /dev/null +++ b/Prometheus/.ingress.yaml.swp Binary files differ diff --git a/Prometheus/.namespace.yaml.swp b/Prometheus/.namespace.yaml.swp new file mode 100644 index 0000000..13bb0d6 --- /dev/null +++ b/Prometheus/.namespace.yaml.swp Binary files differ diff --git a/Prometheus/configmap.yaml b/Prometheus/configmap.yaml new file mode 100644 index 0000000..e3f8919 --- /dev/null +++ b/Prometheus/configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-configmap +data: + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + + # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. + rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'envoy_metrics' + # metrics_path defaults to '/metrics' + metrics_path: /stats/prometheus + # scheme defaults to 'http'. + + static_configs: + - targets: ['192.168.68.90:11111'] diff --git a/Prometheus/deployment.yaml b/Prometheus/deployment.yaml new file mode 100644 index 0000000..5779ede --- /dev/null +++ b/Prometheus/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-deployment + labels: + app: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + containers: + - name: prometheus + image: prom/prometheus + args: + - '--web.external-url=/kubernetes/prometheus/' + - '--web.route-prefix=/kubernetes/prometheus/' + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/data' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + ports: + - name: prometheus-port + containerPort: 9090 + protocol: TCP + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml + - name: prometheus-volume + mountPath: /data + volumes: + - name: prometheus-config + configMap: + name: prometheus-configmap + - name: prometheus-volume + persistentVolumeClaim: + claimName: pvc-prometheus diff --git a/Prometheus/ingress.yaml b/Prometheus/ingress.yaml new file mode 100644 index 0000000..e1f9bca --- /dev/null +++ b/Prometheus/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.org/mergeable-ingress-type: "minion" + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + name: prometheus + labels: + app: prometheus +spec: + rules: + - host: home.jerxie.com + http: + paths: + - backend: + service: + name: prometheus-service + port: + number: 9090 + path: /kubernetes/prometheus(/|$)(.*) + pathType: Exact + diff --git a/Prometheus/kustomization.yaml b/Prometheus/kustomization.yaml new file mode 100644 index 0000000..3282102 --- /dev/null +++ b/Prometheus/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: monitoring +resources: +- ./namespace.yaml +- ./pvc.yaml +- ./service.yaml +- ./configmap.yaml +- ./deployment.yaml +- ./ingress.yaml diff --git a/Prometheus/namespace.yaml b/Prometheus/namespace.yaml new file mode 100644 index 0000000..d325236 --- /dev/null +++ b/Prometheus/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: monitoring diff --git a/Prometheus/pvc.yaml b/Prometheus/pvc.yaml new file mode 100644 index 0000000..539425b --- /dev/null +++ b/Prometheus/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-prometheus + labels: + app: prometheus + annotations: + volume.beta.kubernetes.io/storage-class: "fast" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/Prometheus/service.yaml b/Prometheus/service.yaml new file mode 100644 index 0000000..2b7ec5f --- /dev/null +++ b/Prometheus/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: prometheus-service + labels: + app: prometheus +spec: + ports: + - port: 9090 + selector: + app: prometheus + type: ClusterIP + + diff --git a/Selenium/kustomization.yaml b/Selenium/kustomization.yaml new file mode 100644 index 0000000..0262a4e --- /dev/null +++ b/Selenium/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: selenium +resources: +- ./namespace.yaml +- ./selenium-hub-deployment.yaml +- ./selenium-hub-service.yaml +- ./selenium-node-chrome-deployment.yaml diff --git a/Selenium/namespace.yaml b/Selenium/namespace.yaml new file mode 100644 index 0000000..ed98245 --- /dev/null +++ b/Selenium/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: selenium diff --git a/Selenium/selenium-hub-deployment.yaml b/Selenium/selenium-hub-deployment.yaml new file mode 100644 index 0000000..2b0fcf8 --- /dev/null +++ b/Selenium/selenium-hub-deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: selenium-hub + labels: + app: selenium-hub +spec: + replicas: 1 + selector: + matchLabels: + app: selenium-hub + template: + metadata: + labels: + app: selenium-hub + spec: + containers: + - name: selenium-hub + image: selenium/hub:latest + ports: + - containerPort: 4444 + resources: + limits: + memory: 200Mi + cpu: ".3" + livenessProbe: + httpGet: + path: /wd/hub/status + port: 4444 + initialDelaySeconds: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /wd/hub/status + port: 4444 + initialDelaySeconds: 10 + timeoutSeconds: 5 diff --git a/Selenium/selenium-hub-service.yaml b/Selenium/selenium-hub-service.yaml new file mode 100644 index 0000000..c2ff064 --- /dev/null +++ b/Selenium/selenium-hub-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: selenium-hub + labels: + app: selenium-hub +spec: + ports: + - port: 4444 + targetPort: 4444 + name: hub-port + selector: + app: selenium-hub + type: ClusterIP diff --git a/Selenium/selenium-node-chrome-deployment.yaml b/Selenium/selenium-node-chrome-deployment.yaml new file mode 100644 index 0000000..0d85d4e --- /dev/null +++ b/Selenium/selenium-node-chrome-deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: selenium-node-chrome + labels: + app: selenium-node-chrome +spec: + replicas: 2 + selector: + matchLabels: + app: selenium-node-chrome + template: + metadata: + labels: + app: selenium-node-chrome + spec: + volumes: + - name: dshm + emptyDir: + medium: Memory + containers: + - name: selenium-node-chrome + image: selenium/node-chrome-debug:latest + ports: + - containerPort: 5555 + volumeMounts: + - mountPath: /dev/shm + name: dshm + env: + - name: HUB_HOST + value: "selenium-hub" + - name: HUB_PORT + value: "4444" + resources: + limits: + memory: "300Mi" + cpu: ".4" +