Kubernetes部署流程(学习版)

Kubernetes部署流程(学习版)第一步 准备环境 配置网卡 vi etc sysconfig network scripts gt ONBOOT yes BOOTPROTO static IPADDR 192 168 1 251 NETMASK 255 255 252 0

大家好,我是讯享网,很高兴认识大家。

第一步 :准备环境

#配置网卡

vi /etc/sysconfig/network-scripts/ ----------------------------------> ONBOOT=yes BOOTPROTO=static IPADDR=192.168.1.251 NETMASK=255.255.252.0 GATEWAY=192.168.1.1 DNS1=8.8.8.8 

讯享网

#重启网卡

讯享网service network restart vi /etc/selinux/config SELINUX=disabled setenforce 0 /生效关闭selinux systemctl stop firewalld // 关闭防火墙 iptables -vnL // 查看状态 #其他虚拟机重复以上m 

#修改主机名

vi /etc/hostname //修改主机name hostname k8s-master // 修改生效 hostname k8s-node01 // 修改生效 hostname k8s-node02 // 修改生效 exit //退出 

第二步 :自签Etcd SSL证书 及部署集群

讯享网mkdir k8s mkdir k8s/k8s-cert -p mkdir k8s/etcd-cert -p yum install -y wget //下载安装wget 

//CFSSL工具的安装
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
ls /usr/local/bin/cfssl*

ntpdate time.windows.com //更新系统时间

生成ca文件
cat > ca-config.json <<EOF
{
“signing”: {
“default”: {
“expiry”: “87600h”
},
“profiles”: {
“www”: {
“expiry”: “87600h”,
“usages”: [
“signing”,
“key encipherment”,
“server auth”,
“client auth”
]
}
}
}
}
EOF

cat > ca-csr.json <<EOF
{
“CN”: “etcd CA”,
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “Beijing”,
“ST”: “Beijing”
}
]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json <<EOF
{
“CN”: “etcd”,
“hosts”: [
“192.168.1.251”,
“192.168.1.253”,
“192.168.1.254”
],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“L”: “BeiJing”,
“ST”: “BeiJing”
}
]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

mkdir soft

mkdir /opt/etcd/{cfg,bin,ssl} -p

下载、解压 etcd-v3.3.10-linux-amd64.tar.gz
tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
mv etcd etcdctl /opt/etcd/bin/
ls /opt/etcd/bin/

scp -r /opt/etcd/ root@192.168.1.253:/opt/
scp -r /usr/lib/systemd/system/etcd.service root@192.168.1.253:/usr/lib/systemd/system
// 修改对应node节点
vi /opt/etcd/cfg/etcd

systemctl daemon-reload
systemctl start etcd
tail /var/log/messages -f

// 访问地址 --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379”

第三步 Node安装Docker
// 安装系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
// 配置官方源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
// 更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce
// 引用docker 加速
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io

// 其他节点安装docker复制第三步

第四步 部署kubernetes网络 -Flannel(没有其他需要只安装在Node节点)

//导入flannel.sh文件
chmod +x flannel.sh
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
./flannel.sh https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379

//导入flannel安装包并解压
tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
systemctl start flanneld
ps -ef |grep flanneld // 查看flanneld是否启动
ps -ef |grep docker // 查看docker是否引用flannel分配的IP

//将当前节点flannel 的配置文件copy到其他节点上
scp -r /opt/kubernetes/ root@192.168.1.254:/opt/
scp -r /usr/lib/systemd/system/{flanneld,docker}.service root@192.168.1.254:/usr/lib/systemd/system/

/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379” get /coreos.com/network/subnets/172.17.60.0-24 {“PublicIP”:“192.168.1.248”,“BackendType”:“vxlan”,“BackendData”:{“VtepMAC”:“16:d9:18:a2:92:49”}}

第五步 部署master组件(部署到master节点)
//上传master.zip文件
yum install unzip -y
unzip master.zip

mkdir -p /opt/kubernetes/{bin,cfg,ssl}

cp kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/

chmod +x apiserver.sh
./apiserver.sh 192.168.1.251 https://192.168.1.251:2379,https://192.168.1.253:2379,https://192.168.1.254:2379
vi /opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="–logtostderr=false \ //修改日志存储
–log-dir=/opt/kubernetes/logs \ // 添加日志存储目录
mkdir /opt/kubernetes/logs
/opt/kubernetes/bin/kube-apiserver --help | grep logs

//上传k8s-cert.sh文件
vi k8s-cert.sh // 添加k8s所有的ip地址
bash k8s-cert.sh //生成相关配置文件
cp ca.pem ca-key.pem server.pem server-key.pem /opt/kubernetes/ssl/

//执行下面命令
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,“system:kubelet-bootstrap”
EOF

cat token.csv //查看文件是否生成
mv token.csv /opt/kubernetes/cfg/
systemctl restart kube-apiserver
ps -ef |grep kube-apiserver

netstat -antp |grep 8080 // 查看监听状态

#---------------------- //日志排错
vi /opt/kubernetes/logs/kube-apiserver.INFO
source /opt/kubernetes/cfg/kube-apiserver
/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
#----------------------


讯享网

chmod +x controller-manager.sh
cat controller-manager.sh
./controller-manager.sh 127.0.0.1
./scheduler.sh 127.0.0.1

cp /root/soft/kubernetes/server/bin/kubectl /usr/bin/

kubectl get cs //查看当前etcd节点的状态 和(controller-manager, scheduler)

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

// 上传kubeconfig.sh文件

创建 TLS Bootstrapping Token

#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ’ ')
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,“system:kubelet-bootstrap”
EOF
#----------------------

vi kubeconfig.sh //添加以下
#----------------------
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
#----------------------

bash kubeconfig.sh 192.168.1.251 /root/k8s/k8s-cert

scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.1.254:/opt/kubernetes/cfg/

cd /root/soft/kubernetes/server/bin/
scp kubelet kube-proxy root@192.168.1.253:/opt/kubernetes/bin/
scp kubelet kube-proxy root@192.168.1.254:/opt/kubernetes/bin/

第六步 部署node组件 kubelet
上传node.zip文件
unzip node.zip

bash kubelet.sh 192.168.1.253

vi /opt/kubernetes/cfg/kubelet // 修改文件日志路径

mkdir /opt/kubernetes/logs

systemctl restart kubelet

journalctl -u kubelet // 查看日志

第七步 部署node组件 kube-proxy

// copy到其他node节点上
scp -r /opt/kubernetes/ root@192.168.1.254:/opt/
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.1.254:/usr/lib/systemd/system/

// cd /opt/kubernetes/cfg 修改对应的ip节点
vi kubelet
vi kubelet.config
vi kube-proxy

kubectl get csr //获取请求签名(master节点执行命令)
kubectl certificate approve +请求签名 允许加入集群
// 如:kubectl certificate approve node-csr-WxFdb6gcM_uA7ySEtQ8bn16X04XBuKVoLz8L7Tlu4CM

kubectl get node //查看节点

第八步 部署管理页面

下载并修改Dashboard安装脚本
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml
修改recommended.yaml文件内容:

#增加直接访问端口
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #增加
ports:
- port: 443
targetPort: 8443
nodePort: 30008 #增加
selector:
k8s-app: kubernetes-dashboard

因为自动生成的证书很多浏览器无法使用,
所以我们自己创建,注释掉kubernetes-dashboard-certs对象声明
#—
#apiVersion: v1
#kind: Secret
#metadata:

labels:

k8s-app: kubernetes-dashboard

name: kubernetes-dashboard-certs

namespace: kubernetes-dashboard

#type: Opaque



创建证书
mkdir dashboard-certs

cd dashboard-certs/

创建key文件

openssl genrsa -out dashboard.key 2048

安装Dashboard

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
dashboard-metrics-scraper ClusterIP 10.96.113.127 8000/TCP 16s k8s-app=dashboard-metrics-scraper
kubernetes-dashboard NodePort 10.96.203.158 443:30008/TCP 16s k8s-app=kubernetes-dashboard

新建一个yaml文件:
#创建账号:
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: dashboard-admin
namespace: kubernetes-dashboard


#为用户分配权限:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin-bind-cluster-role
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:

  • kind: ServiceAccount
    name: dashboard-admin
    namespace: kubernetes-dashboard

登录Dashboard
访问:https://192.168.174.137:30008,选择Token登录,复制刚才生成的密钥。
注意,IP为任意node节点的对外的IP.

第九步 Kubernetes Dashboard 设置用户密码登陆

小讯
上一篇 2025-03-16 07:08
下一篇 2025-04-10 21:31

相关推荐

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容,请联系我们,一经查实,本站将立刻删除。
如需转载请保留出处:https://51itzy.com/kjqy/60839.html