2.基础环境准备
2.1 系统基础设置(所有机器都要执行)
2.1.1 设置主机名
hostnamectl set-hostname hdss7-xx.host.com
讯享网
2.1.2 设置网卡
讯享网~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 TYPE="Ethernet" PROXY_METHOD="none" BROWSER_ONLY="no" BOOTPROTO=none DEFROUTE="yes" IPV4_FAILURE_FATAL="no" IPV6INIT="yes" IPV6_AUTOCONF="yes" IPV6_DEFROUTE="yes" IPV6_FAILURE_FATAL="no" IPV6_ADDR_GEN_MODE="stable-privacy" NAME="ens33" UUID="aa2c9856-6a9c-4e96-a9f0-1de9a6581d78" DEVICE="ens33" ONBOOT="yes" IPADDR=10.4.7.xx NETMASK=255.255.255.0 GATEWAY=10.4.7.254 DNS1=10.4.7.254
2.1.3 关闭防火墙与seliunx
~]# systemctl stop firewalld ~]# systemctl disable firewalld ~]# setenforce 0 setenforce: SELinux is disabled ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
2.1.4 设置yum源
讯享网wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo yum clean all yum makecache
2.1.5 安装常用工具
yum install wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils -y
2.2 在hdss7-11.host.com 上安装bind服务(DNS服务)
2.2.1 安装bind 9
讯享网yum install bind -y
2.2.2 配置bind 9
vi /etc/named.conf listen-on port 53 {
10.4.7.11; }; allow-query {
any; }; forwarders {
10.4.7.254; }; recursion yes; dnssec-enable no; dnssec-validation no
讯享网vi /etc/named.rfc1912.zones zone "host.com" IN {
type master; file "host.com.zone"; allow-update {
10.4.7.11; }; }; zone "od.com" IN {
type master; file "od.com.zone"; allow-update {
10.4.7.11; }; };
vi /var/named/host.com.zone $ORIGIN host.com. $TTL 600 ; 10 minutes @ IN SOA dns.host.com. dnsadmin.host.com. ( ; serial 10800 ; refresh (3 hours) 900 ; retry (15 minutes) ; expire (1 week) 86400 ; minimum (1 day) ) NS dns.host.com. $TTL 60 ; 1 minute dns A 10.4.7.11 HDSS7-11 A 10.4.7.11 HDSS7-12 A 10.4.7.12 HDSS7-21 A 10.4.7.21 HDSS7-22 A 10.4.7.22 HDSS7-200 A 10.4.7.200
讯享网vi /var/named/od.com.zone $ORIGIN od.com. $TTL 600 ; 10 minutes @ IN SOA dns.od.com. dnsadmin.od.com. ( ; serial 10800 ; refresh (3 hours) 900 ; retry (15 minutes) ; expire (1 week) 86400 ; minimum (1 day) ) NS dns.od.com. $TTL 60 ; 1 minute dns A 10.4.7.11
2.2.3检查配置并启动bind 9
named-checkconf systemctl start named ; systemctl enable named netstat -lntup|grep 53
讯享网[root@hdss7-11 ~]# dig -t A hdss7-11.host.com @10.4.7.11 +short 10.4.7.11 [root@hdss7-11 ~]# dig -t A hdss7-12.host.com @10.4.7.11 +short 10.4.7.12 [root@hdss7-11 ~]# dig -t A hdss7-21.host.com @10.4.7.11 +short 10.4.7.21 [root@hdss7-11 ~]# dig -t A hdss7-22.host.com @10.4.7.11 +short 10.4.7.22 [root@hdss7-11 ~]# dig -t A hdss7-200.host.com @10.4.7.11 +short 10.4.7.200
2.2.4配置DNS客户端
Linux所有主机
vi /etc/sysconfig/network-scripts/ifcfg-eth0 DNS1=10.4.7.11 vi /etc/resolv.conf search host.com nameserver 10.4.7.11 systemctl restart network
windows主机
讯享网wmnet8网卡更改DNS:10.4.7.11
2.2.5 检查
Linux
ping www.baidu.com ping hdss7-200
Windows
讯享网ping hdss7-200.host.com
2.3 准备签发证书环境(hdss7-200.host.com 上)
2.3.1 安装cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/bin/cfssl wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/bin/cfssl-json wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/bin/cfssl-certinfo chmod +x /usr/bin/cfssl*
2.3.2 创建生成ca证书csr的json配置文件
讯享网mkdir /opt/certs vi /opt/certs/ca-csr.json {
"CN": "OldboyEdu", "hosts": [ ], "key": {
"algo": "rsa", "size": 2048 }, "names": [ {
"C": "CN", "ST": "beijing", "L": "beijing", "O": "od", "OU": "ops" } ], "ca": {
"expiry": "h" } }
2.3.3 生成ca证书文件
[root@hdss7-200 ~]# cd /opt/certs [root@hdss7-200 certs]# cfssl gencert -initca ca-csr.json | cfssl-json -bare ca 2022/01/01 00:23:57 [INFO] generating a new CA key and certificate from CSR 2022/01/01 00:23:57 [INFO] generate received request 2022/01/01 00:23:57 [INFO] received CSR 2022/01/01 00:23:57 [INFO] generating key: rsa-2048 2022/01/01 00:23:57 [INFO] encoded CSR 2022/01/01 00:23:57 [INFO] signed certificate with serial number 8341 [root@hdss7-200 certs]# ll 总用量 16 -rw-r--r-- 1 root root 993 1月 1 00:23 ca.csr -rw-r--r-- 1 root root 328 1月 1 00:23 ca-csr.json -rw------- 1 root root 1679 1月 1 00:23 ca-key.pem -rw-r--r-- 1 root root 1346 1月 1 00:23 ca.pem [root@hdss7-200 certs]#
2.4 部署docker (hdss7-21.host.com,hdss7-22.host.com,hdss7-200.host.com上)
2.4.1 安装
讯享网curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
2.4.2 配置docker
mkdir /etc/docker vi /etc/docker/daemon.json {
"graph": "/data/docker", "storage-driver": "overlay2", "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"], "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"], "bip": "172.7.21.1/24", "exec-opts": ["native.cgroupdriver=systemd"], "live-restore": true }
bip要根据宿主机ip变化
注意:hdss7-21.host.com bip 172.7.21.1/24
hdss7-22.host.com bip 172.7.22.1/24
hdss7-200.host.com bip 172.7.200.1/24
2.4.3 启动
讯享网mkdir -p /data/docker systemctl start docker systemctl enable docker docker --version
2.5部署docker镜像私有仓库harbor(hdss7-200.host.com 上)
2.5.1 下载软件并解压
~]# cd /opt/src src]# wget https://github.com/goharbor/harbor/releases/download/v1.9.4/harbor-offline-installer-v1.9.4.tgz src]# mv harbor /opt/harbor-v1.9.4 src]# ln -s /opt/harbor-v1.9.4 /opt/harbor
2.5.2 配置
讯享网[root@hdss7-200 opt]# vi /opt/harbor/harbor.yml hostname: harbor.od.com http: port: 180 harbor_admin_password:Harbor12345 data_volume: /data/harbor log: level: info rotate_count: 50 rotate_size:200M location: /data/harbor/logs [root@hdss7-200 opt]# mkdir -p /data/harbor/logs
2.5.3 安装docker-compose
[root@hdss7-200 opt]# yum install docker-compose -y
2.5.4 安装harbor
讯享网[root@hdss7-200 opt]# cd harbor [root@hdss7-200 harbor]# ./install.sh ✔ ----Harbor has been installed and started successfully.---- Now you should be able to visit the admin portal at http://harbor.od.com. For more details, please visit https://github.com/goharbor/harbor .
2.5.5 检查
[root@hdss7-200 harbor]# docker-compose ps Name Command State Ports --------------------------------------------------------------------------------------------------- harbor-core /harbor/harbor_core Up harbor-db /docker-entrypoint.sh Up 5432/tcp harbor-jobservice /harbor/harbor_jobservice ... Up harbor-log /bin/sh -c /usr/local/bin/ ... Up 127.0.0.1:1514->10514/tcp harbor-portal nginx -g daemon off; Up 8080/tcp nginx nginx -g daemon off; Up 0.0.0.0:180->8080/tcp,:::180->8080/tcp redis redis-server /etc/redis.conf Up 6379/tcp registry /entrypoint.sh /etc/regist ... Up 5000/tcp registryctl /harbor/start.sh Up
2.5.6 设置harbor开机启动
讯享网[root@hdss7-200 harbor]# vim /etc/rc.d/rc.local # 增加以下内容 # start harbor cd /opt/harbor /usr/docker-compose stop /usr/docker-compose start
2.5.7 配置harbor的dns内网解析(hdss7-11)
[root@hdss7-11 ~]# vi /var/named/od.com.zone ; serial harbor A 10.4.7.200 [root@hdss7-11 ~]# systemctl restart named [root@hdss7-11 ~]# dig -t A harbor.od.com +short 10.4.7.200
2.5.8 安装NGINX并配置
讯享网[root@hdss7-200 harbor]# yum install nginx -y [root@hdss7-200 harbor]# vi /etc/nginx/conf.d/harbor.od.com.conf server {
listen 80; server_name harbor.od.com; client_max_body_size 1000m; location / {
proxy_pass http://127.0.0.1:180; } } [root@hdss7-200 harbor]# nginx -t [root@hdss7-200 harbor]# systemctl start nginx [root@hdss7-200 harbor]# systemctl enable nginx
2.5.9 浏览器打开harbor.od.com并测试
1、浏览器输入:harbor.od.com 用户名:admin 密码:Harbor12345
2、新建项目:public 访问级别:公开
3、下载镜像并给镜像打tag
[root@hdss7-200 harbor]# docker pull nginx:1.7.9 [root@hdss7-200 harbor]# docker images |grep 1.7.9 [root@hdss7-200 harbor]# docker tag 84581e99d807 harbor.od.com/public/nginx:v1.7.9
4、登录harbor并上传到仓库
讯享网[root@hdss7-200 harbor]# docker login harbor.od.com [root@hdss7-200 harbor]# docker push harbor.od.com/public/nginx:v1.7.9
3 部署master节点
3.1 部署etcd集群(hdss7-12,hdss7-21,hdss7-22)
etcd 的leader选举机制,要求至少为3台或以上的奇数台。
3.1.1 签发etcd证书( hdss7-200)
● 创建ca的json配置: /opt/certs/ca-config.json
○ server 表示服务端连接客户端时携带的证书,用于客户端验证服务端身份
○ client 表示客户端连接服务端时携带的证书,用于服务端验证客户端身份
○ peer 表示相互之间连接时使用的证书,如etcd节点之间验证
[root@hdss7-200 ~]# vi /opt/certs/ca-config.json {
"signing": {
"default": {
"expiry": "h" }, "profiles": {
"server": {
"expiry": "h", "usages": [ "signing", "key encipherment", "server auth" ] }, "client": {
"expiry": "h", "usages": [ "signing", "key encipherment", "client auth" ] }, "peer": {
"expiry": "h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } }
讯享网[root@hdss7-200 ~]# vi /opt/certs/etcd-peer-csr.json {
"CN": "k8s-etcd", "hosts": [ "10.4.7.11", "10.4.7.12", "10.4.7.21", "10.4.7.22" ], "key": {
"algo": "rsa", "size": 2048 }, "names": [ {
"C": "CN", "ST": "beijing", "L": "beijing", "O": "od", "OU": "ops" } ] }
● 签发证书与检查
[root@hdss7-200 ~]# cd /opt/certs/ [root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json |cfssl-json -bare etcd-peer
讯享网[root@hdss7-200 certs]# ll etcd-peer.csr etcd-peer-csr.json etcd-peer-key.pem etcd-peer.pem
3.1.2 安装etcd(hdss7-12,hdss7-21,hdss7-22)
创建etcd用户
[root@hdss7-12 ~]# useradd -s /sbin/nologin -M etcd
下载软件,解压,做软连接
讯享网[root@hdss7-12 src]# wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz [root@hdss7-12 src]# tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt/ [root@hdss7-12 src]# cd .. [root@hdss7-12 opt]# mv etcd-v3.1.20-linux-amd64/ etcd-v3.1.20 [root@hdss7-12 opt]# ln -s /opt/etcd-v3.1.20/ /opt/etcd
创建目录,拷贝证书文件
[root@hdss7-12 opt]# mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
拷贝生成的证书文件
讯享网[root@hdss7-12 certs]# scp hdss7-200:/opt/certs/ca.pem . [root@hdss7-12 certs]# scp hdss7-200:/opt/certs/etcd-peer.pem . [root@hdss7-12 certs]# scp hdss7-200:/opt/certs/etcd-peer-key.pem .
创建etcd服务启动脚本
[root@hdss7-12 ~]# vi /opt/etcd/etcd-server-startup.sh #!/bin/sh ./etcd --name etcd-server-7-12 \ --data-dir /data/etcd/etcd-server \ --listen-peer-urls https://10.4.7.12:2380 \ --listen-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 \ --quota-backend-bytes \ --initial-advertise-peer-urls https://10.4.7.12:2380 \ --advertise-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 \ --initial-cluster etcd-server-7-12=https://10.4.7.12:2380,etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380 \ --ca-file ./certs/ca.pem \ --cert-file ./certs/etcd-peer.pem \ --key-file ./certs/etcd-peer-key.pem \ --client-cert-auth \ --trusted-ca-file ./certs/ca.pem \ --peer-ca-file ./certs/ca.pem \ --peer-cert-file ./certs/etcd-peer.pem \ --peer-key-file ./certs/etcd-peer-key.pem \ --peer-client-cert-auth \ --peer-trusted-ca-file ./certs/ca.pem \ --log-output stdout [root@hdss7-12 ~]# chmod +x /opt/etcd/etcd-server-startup.sh
授权目录权限
讯享网[root@hdss7-12 ~]# chown -R etcd.etcd /opt/etcd-v3.1.20/ /data/etcd/ /data/logs/etcd-server/
安装supervisor软件
[root@hdss7-12 ~]# yum install supervisor -y [root@hdss7-12 ~]# systemctl start supervisord [root@hdss7-12 ~]# systemctl enable supervisord
讯享网[root@hdss7-12 ~]# vi /etc/supervisord.d/etcd-server.ini [program:etcd-server-7-12] command=/opt/etcd/etcd-server-startup.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/etcd ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=etcd ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
启动etcd服务并检查
[root@hdss7-12 ~]# supervisorctl update [root@hdss7-12 ~]# supervisorctl status [root@hdss7-12 ~]# netstat -lntup|grep etcd
检查集群状态
讯享网[root@hdss7-22 etcd]# ./etcdctl cluster-health [root@hdss7-22 etcd]# ./etcdctl member list
不同的地方
/opt/etcd/etcd-server-startup.sh --name --listen-peer-urls --listen-client-urls --initial-advertise-peer-urls --advertise-client-urls /etc/supervisord.d/etcd-server.ini
3.2 部署kube-apiserver集群(hdss7-21,hdss7-22)
3.2.1 hdss7-200.host.com上签发client证书
创建生成证书csr的json配置文件
讯享网[root@hdss7-200 certs]# vi /opt/certs/client-csr.json {
"CN": "k8s-node", "hosts": [ ], "key": {
"algo": "rsa", "size": 2048 }, "names": [ {
"C": "CN", "ST": "beijing", "L": "beijing", "O": "od", "OU": "ops" } ] }
生成client证书文件
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client
检查生成的证书文件
讯享网[root@hdss7-200 certs]# ll client.csr client-csr.json client-key.pem client.pem
3.2.2 hdss7-200.host.com上签发kube-apiserver证书
创建生成证书csr的json配置文件
[root@hdss7-200 certs]# vi /opt/certs/apiserver-csr.json {
"CN": "k8s-apiserver", "hosts": [ "127.0.0.1", "192.168.0.1", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local", "10.4.7.10", "10.4.7.21", "10.4.7.22", "10.4.7.23" ], "key": {
"algo": "rsa", "size": 2048 }, "names": [ {
"C": "CN", "ST": "beijing", "L": "beijing", "O": "od", "OU": "ops" } ] }
生成kube-apiserver证书文件
讯享网[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver
检查生成的证书文件
[root@hdss7-200 certs]# ll apiserver.csr apiserver-csr.json apiserver-key.pem apiserver.pem
3.2.3 下载kubernetes服务端(hdss7-21,hdss7-22)
下载 kubernetes 二进制版本包需要科学上网工具
● 进入kubernetes的github页面: https://github.com/kubernetes/kubernetes
● 进入tags页签: https://github.com/kubernetes/kubernetes/tags
● 选择要下载的版本: https://github.com/kubernetes/kubernetes/releases/tag/v1.15.2
● 点击 CHANGELOG-${version}.md 进入说明页面: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md#downloads-for-v1152
● 下载Server Binaries: https://dl.k8s.io/v1.15.2/kubernetes-server-linux-amd64.tar.gz
讯享网[root@hdss7-21 ~]# cd /opt/src [root@hdss7-21 src]# wget https://dl.k8s.io/v1.15.2/kubernetes-server-linux-amd64.tar.gz [root@hdss7-21 src]# tar -xf kubernetes-server-linux-amd64.tar.gz [root@hdss7-21 src]# mv kubernetes /opt/kubernetes-v1.15.2 [root@hdss7-21 opt]# ln -s /opt/kubernetes-v1.15.2/ /opt/kubernetes [root@hdss7-21 opt]# cd kubernetes [root@hdss7-21 kubernetes]# rm -rf kubernetes-src.tar.gz [root@hdss7-21 kubernetes]# cd server/bin [root@hdss7-21 bin]# rm -f *.tar [root@hdss7-21 bin]# rm -f *_tag [root@hdss7-21 bin]# ll
拷贝证书文件到/opt/kubernetes/server/bin/cert目录下
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca-key.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client-key.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/apiserver.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/apiserver-key.pem .
创建配置
讯享网[root@hdss7-21 bin]# mkdir conf [root@hdss7-21 conf]# vi audit.yaml apiVersion: audit.k8s.io/v1beta1 # This is required. kind: Policy # Don't generate audit events for all requests in RequestReceived stage. omitStages: - "RequestReceived" rules: # Log pod changes at RequestResponse level - level: RequestResponse resources: - group: "" # Resource "pods" doesn't match requests to any subresource of pods, # which is consistent with the RBAC policy. resources: ["pods"] # Log "pods/log", "pods/status" at Metadata level - level: Metadata resources: - group: "" resources: ["pods/log", "pods/status"] # Don't log requests to a configmap called "controller-leader" - level: None resources: - group: "" resources: ["configmaps"] resourceNames: ["controller-leader"] # Don't log watch requests by the "system:kube-proxy" on endpoints or services - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: "" # core API group resources: ["endpoints", "services"] # Don't log authenticated requests to certain non-resource URL paths. - level: None userGroups: ["system:authenticated"] nonResourceURLs: - "/api*" # Wildcard matching. - "/version" # Log the request body of configmap changes in kube-system. - level: Request resources: - group: "" # core API group resources: ["configmaps"] # This rule only applies to resources in the "kube-system" namespace. # The empty string "" can be used to select non-namespaced resources. namespaces: ["kube-system"] # Log configmap and secret changes in all other namespaces at the Metadata level. - level: Metadata resources: - group: "" # core API group resources: ["secrets", "configmaps"] # Log all other resources in core and extensions at the Request level. - level: Request resources: - group: "" # core API group - group: "extensions" # Version of group should NOT be included. # A catch-all rule to log all other requests at the Metadata level. - level: Metadata # Long-running requests like watches that fall under this rule will not # generate an audit event in RequestReceived. omitStages: - "RequestReceived"
创建apiserver启动脚本
[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-apiserver.sh #!/bin/bash ./kube-apiserver \ --apiserver-count 2 \ --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \ --audit-policy-file ./conf/audit.yaml \ --authorization-mode RBAC \ --client-ca-file ./cert/ca.pem \ --requestheader-client-ca-file ./cert/ca.pem \ --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \ --etcd-cafile ./cert/ca.pem \ --etcd-certfile ./cert/client.pem \ --etcd-keyfile ./cert/client-key.pem \ --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \ --service-account-key-file ./cert/ca-key.pem \ --service-cluster-ip-range 192.168.0.0/16 \ --service-node-port-range 3000-29999 \ --target-ram-mb=1024 \ --kubelet-client-certificate ./cert/client.pem \ --kubelet-client-key ./cert/client-key.pem \ --log-dir /data/logs/kubernetes/kube-apiserver \ --tls-cert-file ./cert/apiserver.pem \ --tls-private-key-file ./cert/apiserver-key.pem \ --v 2
授权和创建目录
讯享网 [root@hdss7-21 bin]# chmod +x kube-apiserver.sh [root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver
创建supervisor配置
[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-apiserver.ini [program:kube-apiserver-7-21] command=/opt/kubernetes/server/bin/kube-apiserver.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
启动服务并检查
讯享网[root@hdss7-21 bin]# supervisorctl update [root@hdss7-21 bin]# supervisorctl status [root@hdss7-21 bin]# netstat -nltup|grep kube-api
不同的地方
/etc/supervisord.d/kube-apiserver.ini [program:kube-apiserver-7-21]
3.3 部署四层反向代理(hdss7-11、hdss7-12)
3.3.1 安装NGINX和keepalived
讯享网[root@hdss7-12 ~]# yum install nginx keepalived -y [root@hdss7-12 ~]# yum install -y nginx-all-modules
hdss7-11.host.com和hdss7-12.host.com配置NGINX
[root@hdss7-11 ~]# vi /etc/nginx/nginx.conf stream {
upstream kube-apiserver {
server 10.4.7.21:6443 max_fails=3 fail_timeout=30s; server 10.4.7.22:6443 max_fails=3 fail_timeout=30s; } server {
listen 7443; proxy_connect_timeout 2s; proxy_timeout 900s; proxy_pass kube-apiserver; } } [root@hdss7-11 ~]# nginx -t
hdss7-11.host.com和hdss7-12.host.com配置keepalived
check_port.sh hdss7-11.host.com和hdss7-12.host.com都要配置
注意:检查自己网卡名,修改配置中的interface
讯享网[root@hdss7-11 ~]# vi /etc/keepalived/check_port.sh #!/bin/bash #keepalived 监控端口脚本 #使用方法: #在keepalived的配置文件中 #vrrp_script check_port {#创建一个vrrp_script脚本,检查配置 # script "/etc/keepalived/check_port.sh 6379" #配置监听的端口 # interval 2 #检查脚本的频率,单位(秒) #} CHK_PORT=$1 if [ -n "$CHK_PORT" ];then PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l` if [ $PORT_PROCESS -eq 0 ];then echo "Port $CHK_PORT Is Not Used,End." exit 1 fi else echo "Check Port Cant Be Empty!" fi [root@hdss7-11 ~]# chmod +x /etc/keepalived/check_port.sh 配置文件 keepalived 主: [root@hdss7-11 conf.d]# vi /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs {
router_id 10.4.7.11 } vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443" interval 2 weight -20 } vrrp_instance VI_1 {
state MASTER interface eth0 virtual_router_id 251 priority 100 advert_int 1 mcast_src_ip 10.4.7.11 nopreempt authentication {
auth_type PASS auth_pass } track_script {
chk_nginx } virtual_ipaddress {
10.4.7.10 } } keepalived 从: [root@hdss7-12 conf.d]# vi /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs {
router_id 10.4.7.12 } vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443" interval 2 weight -20 } vrrp_instance VI_1 {
state BACKUP interface eth0 virtual_router_id 251 mcast_src_ip 10.4.7.12 priority 90 advert_int 1 authentication {
auth_type PASS auth_pass } track_script {
chk_nginx } virtual_ipaddress {
10.4.7.10 } }
启动代理并检查
systemctl start nginx keepalived systemctl enable nginx keepalived netstat -lntup|grep nginx ip addr
3.4 部署controller-manager(hdss7-21、hdss7-22)
创建启动脚本
讯享网[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-controller-manager.sh #!/bin/sh ./kube-controller-manager \ --cluster-cidr 172.7.0.0/16 \ --leader-elect true \ --log-dir /data/logs/kubernetes/kube-controller-manager \ --master http://127.0.0.1:8080 \ --service-account-private-key-file ./cert/ca-key.pem \ --service-cluster-ip-range 192.168.0.0/16 \ --root-ca-file ./cert/ca.pem \ --v 2
授权文件权限,创建目录
[root@hdss7-21 bin]# chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh [root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-controller-manager
创建supervisor配置
讯享网[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-conntroller-manager.ini [program:kube-controller-manager-7-21] command=/opt/kubernetes/server/bin/kube-controller-manager.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
启动服务并检查
[root@hdss7-21 bin]# supervisorctl update [root@hdss7-21 bin]# supervisorctl status
不同的地方
讯享网/etc/supervisord.d/kube-conntroller-manager.ini [program:kube-controller-manager-7-21]
3.5 部署kube-scheduler(hdss7-21、hdss7-22)
创建启动脚本
[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-scheduler.sh #!/bin/sh ./kube-scheduler \ --leader-elect \ --log-dir /data/logs/kubernetes/kube-scheduler \ --master http://127.0.0.1:8080 \ --v 2
授权文件权限,创建目录
讯享网[root@hdss7-21 bin]# chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh [root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-scheduler
创建supervisor配置
[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-scheduler.ini [program:kube-scheduler-7-21] command=/opt/kubernetes/server/bin/kube-scheduler.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
启动服务并检查
讯享网[root@hdss7-21 bin]# supervisorctl update [root@hdss7-21 bin]# supervisorctl status
/etc/supervisord.d/kube-scheduler.ini [program:kube-scheduler-7-21]
建立kubectl软链接
讯享网 ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
检查master节点
[root@hdss7-21 bin]# kubectl get cs
4 部署node节点(hdss7-21、hdss7-22)
4.1 部署kubelet
讯享网[root@hdss7-200 certs]# vi kubelet-csr.json {
"CN": "k8s-kubelet", "hosts": [ "127.0.0.1", "10.4.7.10", "10.4.7.21", "10.4.7.22", "10.4.7.23", "10.4.7.24", "10.4.7.25", "10.4.7.26", "10.4.7.27", "10.4.7.28" ], "key": {
"algo": "rsa", "size": 2048 }, "names": [ {
"C": "CN", "ST": "beijing", "L": "beijing", "O": "od", "OU": "ops" } ] }
生成kubelet证书文件
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
检查生成的证书文件
讯享网[root@hdss7-200 certs]# ll kubelet.csr kubelet-csr.json kubelet-key.pem kubelet.pem
拷贝证书文件至各节点(21、22都要拷贝),并创建配置
[root@hdss7-21 cert]# pwd /opt/kubernetes/server/bin/cert [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kubelet.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kubelet-key.pem .
创建配置(只需要在一台节点上执行下面配置)
讯享网[root@hdss7-21 conf]# pwd /opt/kubernetes/server/bin/conf
- set-cluster
[root@hdss7-21 conf]# kubectl config set-cluster myk8s \ --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \ --embed-certs=true \ --server=https://10.4.7.10:7443 \ --kubeconfig=kubelet.kubeconfig
- set-credentials
讯享网[root@hdss7-21 conf]# kubectl config set-credentials k8s-node \ --client-certificate=/opt/kubernetes/server/bin/cert/client.pem \ --client-key=/opt/kubernetes/server/bin/cert/client-key.pem \ --embed-certs=true \ --kubeconfig=kubelet.kubeconfig
- set-context
[root@hdss7-21 conf]# kubectl config set-context myk8s-context \ --cluster=myk8s \ --user=k8s-node \ --kubeconfig=kubelet.kubeconfig
- use-context
讯享网[root@hdss7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
- 查看生成的kubelet.kubeconfig
[root@hdss7-21 conf]# ll kubelet.kubeconfig
- k8s-node.yaml
讯享网[root@hdss7-21 conf]# vi k8s-node.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: k8s-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:node subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: k8s-node
- 应用资源配置
[root@hdss7-21 conf]# kubectl create -f k8s-node.yaml
- 查看集群角色和角色属性
讯享网[root@hdss7-21 conf]# kubectl get clusterrolebinding k8s-node [root@hdss7-21 conf]# kubectl get clusterrolebinding k8s-node -o yaml
- 拷贝kubelet.kubeconfig 到hdss7-22.host.com上,在22上执行
[root@hdss7-22 conf]# scp hdss7-21:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig .
hdss7-200.host.com上下载pause镜像
讯享网[root@hdss7-200 ~]# docker pull kubernetes/pause
上传到docker私有仓库harbor中
[root@hdss7-200 ~]# docker images -a [root@hdss7-200 ~]# docker tag f9d5de079539 harbor.od.com/public/pause:latest [root@hdss7-200 ~]# docker images -a [root@hdss7-200 ~]# docker push harbor.od.com/public/pause:latest
hdss7-21.host.com与hdss7-22.host.com上创建kubelet启动脚本
讯享网[root@hdss7-21 conf]# vi /opt/kubernetes/server/bin/kubelet.sh #!/bin/sh ./kubelet \ --anonymous-auth=false \ --cgroup-driver systemd \ --cluster-dns 192.168.0.2 \ --cluster-domain cluster.local \ --runtime-cgroups=/systemd/system.slice \ --kubelet-cgroups=/systemd/system.slice \ --fail-swap-on="false" \ --client-ca-file ./cert/ca.pem \ --tls-cert-file ./cert/kubelet.pem \ --tls-private-key-file ./cert/kubelet-key.pem \ --hostname-override hdss7-21.host.com \ --image-gc-high-threshold 20 \ --image-gc-low-threshold 10 \ --kubeconfig ./conf/kubelet.kubeconfig \ --log-dir /data/logs/kubernetes/kube-kubelet \ --pod-infra-container-image harbor.od.com/public/pause:latest \ --root-dir /data/kubelet
hdss7-21.host.com与hdss7-22.host.com上授权,创建目录
[root@hdss7-21 conf]# chmod +x /opt/kubernetes/server/bin/kubelet.sh [root@hdss7-21 conf]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
hdss7-21.host.com与hdss7-22.host.com上创建supervisor配置
讯享网[root@hdss7-21 conf]# vi /etc/supervisord.d/kube-kubelet.ini [program:kube-kubelet-7-21] command=/opt/kubernetes/server/bin/kubelet.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
启动服务并检查
[root@hdss7-21 conf]# supervisorctl update [root@hdss7-21 conf]# supervisorctl status
不同的地方
讯享网/opt/kubernetes/server/bin/kubelet.sh --hostname-override /etc/supervisord.d/kube-kubelet.ini [program:kube-kubelet-7-21]
检查所有节点并给节点打上标签
[root@hdss7-21 ~]# kubectl get nodes [root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master= [root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node= [root@hdss7-21 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION hdss7-21.host.com Ready master,node 5m11s v1.15.2 hdss7-22.host.com Ready <none> 5m10s v1.15.2
4.2 部署kube-proxy(hdss7-21、hdss7-22)
hdss7-200.host.com上签发kube-proxy证书
- 创建生成证书csr的json配置文件
讯享网[root@hdss7-200 certs]# vi kube-proxy-csr.json {
"CN": "system:kube-proxy", "key": {
"algo": "rsa", "size": 2048 }, "names": [ {
"C": "CN", "ST": "beijing", "L": "beijing", "O": "od", "OU": "ops" } ] }
- 生成kube-proxy证书文件
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
- 检查生成的证书文件
讯享网[root@hdss7-200 certs]# ll kube-proxy-client.csr kube-proxy-client-key.pem kube-proxy-client.pem kube-proxy-csr.json
拷贝证书文件至各节点(hdss7-21、hdss7-22),并创建配置
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kube-proxy-client.pem . [root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kube-proxy-client-key.pem .
创建配置 只需要在一台节点上执行
- set-cluster
讯享网[root@hdss7-21 conf]# kubectl config set-cluster myk8s \ --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \ --embed-certs=true \ --server=https://10.4.7.10:7443 \ --kubeconfig=kube-proxy.kubeconfig
- set-credentials
[root@hdss7-21 conf]# kubectl config set-credentials kube-proxy \ --client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \ --client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig
- set-context
讯享网[root@hdss7-21 conf]# kubectl config set-context myk8s-context \ --cluster=myk8s \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig
- use-context
[root@hdss7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
- 拷贝kube-proxy.kubeconfig 到 hdss7-22.host.com的conf目录下
讯享网[root@hdss7-22 conf]# scp hdss7-21:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig .
创建kube-proxy启动脚本(hdss7-21,hdss7-22)
加载ipvs模块
[root@hdss7-21 bin]# lsmod |grep ip_vs [root@hdss7-21 bin]# vi /root/ipvs.sh #!/bin/bash ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs" for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*") do /sbin/modinfo -F filename $i &>/dev/null if [ $? -eq 0 ];then /sbin/modprobe $i fi done [root@hdss7-21 bin]# chmod +x /root/ipvs.sh [root@hdss7-21 bin]# sh /root/ipvs.sh [root@hdss7-21 bin]# lsmod |grep ip_vs
创建启动脚本
讯享网[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-proxy.sh #!/bin/sh ./kube-proxy \ --cluster-cidr 172.7.0.0/16 \ --hostname-override hdss7-21.host.com \ --proxy-mode=ipvs \ --ipvs-scheduler=nq \ --kubeconfig ./conf/kube-proxy.kubeconfig
授权,创建目录
[root@hdss7-21 bin]# ls -l /opt/kubernetes/server/bin/conf/|grep kube-proxy [root@hdss7-21 bin]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh [root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-proxy
创建supervisor配置
讯享网[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-proxy.ini [program:kube-proxy-7-21] command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
启动服务并检查
[root@hdss7-21 bin]# supervisorctl update [root@hdss7-21 bin]# supervisorctl status [root@hdss7-21 bin]# yum install ipvsadm -y [root@hdss7-21 bin]# ipvsadm -Ln [root@hdss7-21 bin]# kubectl get svc
5 验证kubernetes集群
在任意一个节点上创建一个资源配置清单
讯享网[root@hdss7-21 ~]# vi /root/nginx-ds.yaml apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: nginx-ds spec: template: metadata: labels: app: nginx-ds spec: containers: - name: my-nginx image: harbor.od.com/public/nginx:v1.7.9 ports: - containerPort: 80
应用资源配置,并检查
[root@hdss7-21 bin]# kubectl create -f /root/nginx-ds.yaml [root@hdss7-21 bin]# kubectl get pods [root@hdss7-21 bin]# kubectl get pods -o wide [root@hdss7-21 bin]# curl 172.7.21.2
hdss7-22.host.com上
讯享网[root@hdss7-22 bin]# kubectl get pods [root@hdss7-22 bin]# kubectl get pods -o wide [root@hdss7-22 bin]# curl 172.7.22.2
查看kubernetes是否搭建好
[root@hdss7-21 bin]# kubectl get cs [root@hdss7-21 bin]# kubectl get node [root@hdss7-21 bin]# kubectl get pods

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容,请联系我们,一经查实,本站将立刻删除。
如需转载请保留出处:https://51itzy.com/kjqy/28135.html