k8s初始版本storm

This commit is contained in:
lixikang
2020-05-25 16:48:37 +08:00
parent 0d1396f415
commit fc431fa3bd
53 changed files with 2660 additions and 0 deletions

5
zk-kafka/check.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/bash
for i in 0 1 2 ;do
kubectl exec zk-$i -c kubernetes-zookeeper zkServer.sh status
done;

13
zk-kafka/host.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
HOSTNAME=`hostname -s`
if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
ORD=${BASH_REMATCH[2]}
PORT=$((ORD + 9092))
#12.345.67.8 是 LB 的 ip
export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.40.127:$PORT"
else
echo "Failed to get index from hostname $HOST"
exit 1
fi
echo $KAFKA_CFG_ADVERTISED_LISTENERS

View File

@@ -0,0 +1,98 @@
---
apiVersion: v1
kind: Service
metadata:
name: kafka-svc
spec:
ports:
- port: 9093
targetPort: 9093
name: server
protocol: TCP
nodePort: 9093
type: NodePort
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
spec:
serviceName: kafka-svc
replicas: 3
selector:
matchLabels:
app: kafka
template:
metadata:
labels:
app: kafka
spec:
hostAliases:
- ip: "192.168.40.127"
hostnames:
- "bigdata-127"
- ip: "192.168.40.151"
hostnames:
- "bigdata-151"
- ip: "192.168.40.152"
hostnames:
- "bigdata-152"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- kafka
topologyKey: "kubernetes.io/hostname"
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
terminationGracePeriodSeconds: 300
containers:
- name: k8skafka
securityContext:
runAsUser: 0
imagePullPolicy: Always
image: 192.168.40.153:9080/k8s/kafka:test3
resources:
requests:
memory: "1Gi"
cpu: 500m
ports:
- containerPort: 9093
hostPort: 9093
env:
- name: KA_PORT
value: "9093"
- name: HOST_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: ZK_DIR
value: "zk-0.zk-hs.default.svc.cluster.local:2182/kafka-test"
volumeMounts:
- name: datadir
mountPath: /opt/kafka-logs
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteMany" ]
storageClassName: nfs
resources:
requests:
storage: 5Gi

13
zk-kafka/kafka-svc.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: kafka-svc
labels:
app: kafka
spec:
ports:
- port: 9093
name: server
clusterIP: None
selector:
app: kafka

85
zk-kafka/kafka.yaml Normal file
View File

@@ -0,0 +1,85 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
spec:
serviceName: kafka-svc
replicas: 3
selector:
matchLabels:
app: kafka
template:
metadata:
labels:
app: kafka
spec:
hostAliases:
- ip: "192.168.40.127"
hostnames:
- "bigdata-127"
- ip: "192.168.40.151"
hostnames:
- "bigdata-151"
- ip: "192.168.40.152"
hostnames:
- "bigdata-152"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- kafka
topologyKey: "kubernetes.io/hostname"
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
terminationGracePeriodSeconds: 300
containers:
- name: k8skafka
securityContext:
runAsUser: 0
imagePullPolicy: Always
image: 192.168.40.153:9080/k8s/kafka:test3
resources:
requests:
memory: "10Gi"
cpu: 500m
ports:
- containerPort: 9093
hostPort: 9093
env:
- name: KA_PORT
value: "9093"
- name: HOST_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: ZK_DIR
value: "zk-0.zk-hs.default.svc.cluster.local:2182/kafka-test"
volumeMounts:
- name: datadir
mountPath: /opt/kafka-logs
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteMany" ]
storageClassName: nfs
resources:
requests:
storage: 5Gi

89
zk-kafka/pv.yaml Normal file
View File

@@ -0,0 +1,89 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs/storage1
server: 192.168.40.127
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs/storage2
server: 192.168.40.127
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs/storage3
server: 192.168.40.127
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv4
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs/storage4
server: 192.168.40.127
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv5
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs/storage5
server: 192.168.40.127
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv6
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs/storage6
server: 192.168.40.127

157
zk-kafka/zk.yaml Normal file
View File

@@ -0,0 +1,157 @@
apiVersion: v1
kind: Service
metadata:
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
labels:
app: zk
spec:
type: NodePort
ports:
- port: 2182
name: client
targetPort: 2182
nodePort: 2182
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3 #创建三个pod
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zk
spec:
nodeSelector: #进行label匹配调度pod到目标节点
travis.io/schedule-only: "kafka"
tolerations:
- key: "travis.io/schedule-only"
operator: "Equal"
value: "kafka"
effect: "NoSchedule"
- key: "travis.io/schedule-only"
operator: "Equal"
value: "kafka"
effect: "NoExecute"
tolerationSeconds: 3600
- key: "travis.io/schedule-only"
operator: "Equal"
value: "kafka"
effect: "PreferNoSchedule"
affinity: #配置每个机器只能运行一个pod
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
imagePullSecrets: # 指定自己的私有镜像秘钥
- name: registry-key
containers:
- name: kubernetes-zookeeper
securityContext:
runAsUser: 0
imagePullPolicy: Always
image: 192.168.40.153:9080/k8s/k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10
resources:
requests:
memory: "20Mi"
cpu: "0.1"
ports:
- containerPort: 2182
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2182 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe: # pod 健康监测
exec:
command:
- sh
- -c
- "zookeeper-ready 2182"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2182"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates: #nfs 映射模版配置
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteMany" ]
storageClassName: nfs
resources:
requests:
storage: 10Gi