k8s集群———单master节点2node节点

时间:2023-12-13 08:52:32
#部署node节点
,将kubelet-bootstrap用户绑定到系统集群角色中(颁发证书的最小权限)
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap ,master节点执行
root@k8s-master: ~/k8s ::
$ cat kubeconfig.sh
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008 #cat > token.csv <<EOF
#${BOOTSTRAP_TOKEN},kubelet-bootstrap,,"system:kubelet-bootstrap"
#EOF #----------------------
#api-server节点ip
APISERVER=$
#证书所在目录
SSL_DIR=$
####kubeconfig文件存放的访问apiserver的认证信息, # 创建kubelet bootstrapping kubeconfig
export KUBE_APISERVER="https://$APISERVER:6443" # 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig # 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig # 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig # 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig #---------------------- # 创建kube-proxy kubeconfig文件 kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \
--client-certificate=$SSL_DIR/kube-proxy.pem \
--client-key=$SSL_DIR/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
root@k8s-master: ~/k8s ::
$ ###执行生成两个文件bootstrap.kubeconfig,kube-proxy.kubeconfig
root@k8s-master: ~/k8s ::
$ bash kubeconfig.sh 192.168.1.63 /root/k8s/k8s-cert/
root@k8s-master: ~/k8s/k8s-cert ::
$ ls
admin.csr admin.pem ca.csr ca.pem kube-proxy-csr.json kube-proxy.pem server-key.pem
admin-csr.json bootstrap.kubeconfig ca-csr.json k8s-cert.sh kube-proxy-key.pem server.csr server.pem
admin-key.pem ca-config.json ca-key.pem kube-proxy.csr kube-proxy.kubeconfig server-csr.json
root@k8s-master: ~/k8s/k8s-cert ::
$ clear
root@k8s-master: ~/k8s/k8s-cert ::
$
#不要复制粘贴。。。
root@k8s-master: ~/k8s/k8s-cert ::
$ cat bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR2akNDQXFhZ0F3SUJBZ0lVWDJManhCcWlvUzlQczBGc0U4SlJGSnBQcHZBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEU1TURNeE9ERTFNekl3TUZvWERUSTBNRE14TmpFMU16SXdNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RERBSwpCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBc3NnUTZZNTFnaDYwRFFtQXBhY2QKbllRU2NLZFRHMWRaNU0zRGxPMVNVamk2aUExVHpSWlEwZ0VBWE0yWDRLSHgvcGNEN3hJZTJrQXB0Vi92aWo4WAo4Q1RCNEkzeGVQVFJHQVljWmFGNnBWaHNETExqZjBPd1ZYTUtvZi9uNi9tV1JVQXhsd3AwM2d4aExtMGp0L0hlCjB1UDlYYXhnV3EyNFI2emF6aFloM3VNaUZTdWYzWHdCV3VrTXV5YlAwSzJHczJrOU5ERFN6dUJMVU1ZUUVLWUsKcmFDVjhUM1psdk5DbzJWbDQxcFF4Wm80RDBLRUZWQWUwOXNuZkRwaUFCeUpGZlArQWs5M2xVOHdBcUVUeEpXNApYWksxQUNMeTN4WmlDYldCc0dBYWhlK1AxUmNJNjZJTldwZHV2TGFWOWVhcTRxOCtvbHBJbkQ0Z3gyQ1BVWjNKCmd3SURBUUFCbzJZd1pEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFqQWQKQmdOVkhRNEVGZ1FVaEpIL1NmdTBzMk0xdjNhalBlVlZkSjFGVXA4d0h3WURWUjBqQkJnd0ZvQVVoSkgvU2Z1MApzMk0xdjNhalBlVlZkSjFGVXA4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCUTh1b05naC9IWnMyOXJqbTlxCjRtSER0M0puVnpNbWwzUEo0ZUphTnhlKytxUWRxQy9mVjI2Sk05NC9tbjQ3RDdNSWdFMTFlbFI1WjlCc2lWQlYKeTZha0Vnck9FKzduemtQUHhraThjQ0hSOFlrSzFLME52T3A3K3ZoYmYvWnlZcDRNUFhkZ2VEanY5VUIyQ3BTYwpEMFJvVkNsYUtEZjE0bjQ5eXJHU3NiVWVxdjBUYWJQS0VHUlZvN0t2dDMydTYvVWNOZGxhcHUvSW1USEs5clJpClFNaHRHbkpmMDZiaXlkVjkxYzJvTEdzRXNsbG9PVUhpUklYNUJySDlTS2xhd0JkZXg3RTJrUHdlRUxXTGZKbjQKTi9HN0VUc0xZalZLcXdxYWhNRHphcUdKdC9ML1VFakxCNk95aTRvd1ZldEhqcFIwQ0h3dDlhazNJMjg5QUpwZgpZNmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://192.168.1.63:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: 0fb61c46f8991b718eb38d27b605b008
###这里一定要有token ########
,将生成的认证文件复制到两个node节点
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.1.65:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.1.66:/opt/kubernetes/cfg/
scp kubelet kube-proxy root@192.168.1.65:/opt/kubernetes/bin/
scp kubelet kube-proxy root@192.168.1.66:/opt/kubernetes/bin/ ,node执行脚本
root@k8s-node01: ~ ::
$ cat kubelet.sh
#!/bin/bash
#node节点ip地址
NODE_ADDRESS=$
#部署dns用的ip地址
DNS_SERVER_IP=${:-"10.0.0.2"} cat <<EOF >/opt/kubernetes/cfg/kubelet KUBELET_OPTS="--logtostderr=true \\
--v= \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0" EOF cat <<EOF >/opt/kubernetes/cfg/kubelet.config kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port:
readOnlyPort:
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP}
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
EOF cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service [Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
root@k8s-node01: ~ ::
执行脚本
$ bash kubelet.sh 192.168.1.65 #proxy脚本
root@k8s-node01: ~ ::
$ cat proxy.sh
#!/bin/bash NODE_ADDRESS=$ cat <<EOF >/opt/kubernetes/cfg/kube-proxy KUBE_PROXY_OPTS="--logtostderr=true \\
--v= \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/ \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig" EOF cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
root@k8s-node01: ~ ::
$
执行脚本
bash proxy.sh 192.168.1.65 ,master节点执行,
(查看node节点发给master的请求)
$ kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-M6k2DlnOW4FIWGF7v4V97AyrmPBKSsIpzNj_BtKHZGE 3h53m kubelet-bootstrap Pending
node-csr-RyWUEYiuwDYFcu7fegbHl-XmUpc3diJtdHowU9LUJyU 3h39m kubelet-bootstrap Pending
root@k8s-master: ~ ::
$
(同意node节点加入请求,命令,后边加上节点name)
kubectl certificate approve node-csr-CB7wV3ITot1QnhMPl2psUT-aAu2mEsXeW-8a9VelNfg (在master查看加入集群节点)
root@k8s-master: ~ ::
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.65 Ready <none> 3h23m v1.13.4
192.168.1.66 Ready <none> 3h11m v1.13.4
root@k8s-master: ~ ::
$ =============
node2节点操作
root@k8s-node01: ~ ::
$ scp -r /opt/kubernetes/ root@192.168.1.66:/opt/ $ cat kubelet KUBELET_OPTS="--logtostderr=true \
--v= \
--hostname-override=192.168.1.66 \ #######改成当前nodeip地址
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0" root@k8s-node01: /opt/kubernetes/cfg ::
$ root@k8s-node01: /opt/kubernetes/cfg ::
$ cat kubelet.config kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.1.66 #######改成当前nodeip地址
port:
readOnlyPort:
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
root@k8s-node01: /opt/kubernetes/cfg ::
$ root@k8s-node01: /opt/kubernetes/cfg ::
$ cat kube-proxy KUBE_PROXY_OPTS="--logtostderr=true \
--v= \
--hostname-override=192.168.1.65 \ #######改成当前nodeip地址
--cluster-cidr=10.0.0.0/ \
--proxy-mode=ipvs \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig" root@k8s-node01: /opt/kubernetes/cfg ::
$ #####
需要删除给192.168.1.65的ssl文件全部删掉,因为要生成66ip的ssl问件
rm /opt/kubernetes/ssl/* systemctl start kubelet
systemctl start kube-proxy 根node1一样在master执行以下,将node2认证请求同意,并加入集群
3,master节点执行,
(查看node节点发给master的请求)
$ kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-M6k2DlnOW4FIWGF7v4V97AyrmPBKSsIpzNj_BtKHZGE 3h53m kubelet-bootstrap Pending
node-csr-RyWUEYiuwDYFcu7fegbHl-XmUpc3diJtdHowU9LUJyU 3h39m kubelet-bootstrap Pending
root@k8s-master: ~ 20:57:28
$
(同意node节点加入请求,命令,后边加上节点name)
kubectl certificate approve node-csr-CB7wV3ITot1QnhMPl2psUT-aAu2mEsXeW-8a9VelNfg (在master查看加入集群节点)
root@k8s-master: ~ 20:59:30
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.65 Ready <none> 3h23m v1.13.4
192.168.1.66 Ready <none> 3h11m v1.13.4
root@k8s-master: ~ 20:59:37
$ ###########
查看创建pod
root@k8s-master: ~ 21:14:53
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
java-84767655bc-24mr6 0/1 Completed 3 3m21s
nginx-7cdbd8cdc9-56xwp 1/1 Running 0 3h21m
nginx-7cdbd8cdc9-m94rk 1/1 Running 0 3h21m
nginx-7cdbd8cdc9-qd72h 1/1 Running 0 3h22m
root@k8s-master: ~ 21:14:55
$ 查看集群节点
root@k8s-master: ~ 21:15:44
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.65 Ready <none> 3h40m v1.13.4
192.168.1.66 Ready <none> 3h28m v1.13.4
root@k8s-master: ~ 21:15:50
$ 查看服务运行在哪个节点
root@k8s-master: ~ 21:16:22
$ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
java-84767655bc-24mr6 0/1 CrashLoopBackOff 4 4m55s 172.17.88.3 192.168.1.66 <none> <none>
nginx-7cdbd8cdc9-56xwp 1/1 Running 0 3h22m 172.17.88.2 192.168.1.66 <none> <none>
nginx-7cdbd8cdc9-m94rk 1/1 Running 0 3h22m 172.17.75.3 192.168.1.65 <none> <none>
nginx-7cdbd8cdc9-qd72h 1/1 Running 0 3h23m 172.17.75.2 192.168.1.65 <none> <none>
root@k8s-master: ~ 21:16:29
$ #############运行一个测试实例
创建一个测试示例
kubectl create deployment nginx --images=nginx 添加三个副本
kubectl scale deployment nginx --replicas=3 启动副本添加监听,访问端口随机生成
kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort 授权查看pod日志
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous