RKE部署Kubernetes集群

1.操作系统

几乎所有安装了Docker的Linux操作系统都可以运行RKE,但是官方推荐使用Ubuntu 16.04,因为您大多数RKE的开发和测试都在Ubuntu 16.04上。

2.RKE安装Kubernetes集群

2.1RKE安装

1.二进制文件安装RKE

1.浏览器访问RKE Releases页面,下载符合操作系统的最新RKE安装程序:

[root@master ~]# mkdir /etc/rke
[root@master ~]# cd /etc/rke/
[root@master rke]# wget https://github.com/rancher/rke/releases/download/v1.0.4/rke_linux-amd64

2.运行以下命令给与二进制文件执行权限;

[root@master rke]# chmod +x rke_linux-amd64
[root@master rke]# mv rke_linux-amd64 /usr/bin/rke

3.确认RKE可执行:

[root@master rke]# rke --version
rke version v1.0.4

补充

##关闭swap
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

##内核参数
cat >> /etc/sysctl.conf<<EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
vm.swappiness=0
vm.max_map_count=655360
EOF
sysctl -p

2.2 Kubernetes安装

1.准备Kubernetes集群的节点

master节点:
172.20.100.103 name=cnvs-kubm-100-103  role: [controlplane,worker,etcd] user: k8suser

node 节点:
172.20.100.104 name=cnvs-kubnode-100-104  role: [controlplane,worker,etcd] user: k8suser 
172.20.100.105 name=cnvs-kubnode-100-105  role: [controlplane,worker,etcd] user: k8suser
172.20.100.106 name=cnvs-kubnode-100-106  role: [controlplane,worker,etcd] user: k8suser

2. 创建rke配置文件

有两种简单的方法可以创建cluster.yml

  • 使用最小值rke配置cluster.yml并根据将使用的节点更新它;
  • 使用rke config向导式生成配置;

2.1运行rke config配置向导

##不指定名称
rke config
##指定名称
rke config --name cluster.yml
##创建空的
rke config --empty --name cluster.yml
##仅打印
rke config --print

步骤如下:

#所有节点执行
useradd rancher -G docker && echo "123456" |passwd --stdin rancher
#103执行
su - rancher
ssh-copy-id rancher@node-ip
nodes:
#master*3
  - address: 172.20.101.169
    user: ptmind
    role:
    - controlplane
    - worker
    - etcd

  - address: 172.20.101.181
    user: ptmind
    role:
    - controlplane
    - worker
    - etcd

  - address: 172.20.101.182
    user: ptmind
    role:
    - controlplane
    - worker
    - etcd

#node*n
  - address: 172.20.100.13
    user: ptmind
    role:
    - worker
    labels: {app: ingress}

  - address: 172.20.100.14
    user: ptmind
    role:
    - worker
    labels: {app: ingress}

  - address: 172.20.100.15
    user: ptmind
    role:
    - worker
    labels: {app: ingress}

  - address: 172.20.100.16
    user: ptmind
    role:
    - worker

  - address: 172.20.100.17
    user: ptmind
    role:
    - worker

  - address: 172.20.100.18
    user: ptmind
    role:
    - worker

  - address: 172.20.100.19
    user: ptmind
    role:
    - worker

  - address: 172.20.100.20
    user: ptmind
    role:
    - worker

  - address: 172.20.100.24
    user: ptmind
    role:
    - worker

  - address: 172.20.100.25
    user: ptmind
    role:
    - worker

services:
  etcd:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    external_urls: []
    ca_cert: ""
    cert: ""
    key: ""
    path: ""
    uid: 0
    gid: 0
    snapshot: true
    retention: "6h"
    creation: "48h"
    backup_config: null
  kube-api:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    service_cluster_ip_range: 10.43.0.0/16
    service_node_port_range: "30000-32767"
    pod_security_policy: false
    always_pull_images: false
    secrets_encryption_config: null
    audit_log: null
    admission_configuration: null
    event_rate_limit: null
  kube-controller:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    cluster_cidr: 10.42.0.0/16
    service_cluster_ip_range: 10.43.0.0/16
  scheduler:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
  kubelet:
    image: ""
    extra_args: {}
    extra_binds:
      - "/usr/libexec/kubernetes/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins"
      - "/etc/iscsi:/etc/iscsi"
      - "/usr/libexec/kubernetes/kubelet-plugins/volume/exec:/usr/libexec/kubernetes/kubelet-plugins/volume/exec"
    extra_env: []
    cluster_domain: cluster.local
    infra_container_image: ""
    cluster_dns_server: 10.43.0.10
    fail_swap_on: false
    generate_serving_certificate: false
  kubeproxy:
    image: ""
    extra_args: {}
    extra_binds:
#      - "/lib/modules:/lib/modules"
    extra_env: []
network:
  plugin: canal
  options: {}
  mtu: 0
  node_selector: {}
authentication:
  strategy: x509
  sans:
      - "172.20.101.252"
      - "cnrkedev.pxx.com"
  webhook: null
addons: ""
addons_include: []
system_images:
  etcd: rancher/coreos-etcd:v3.4.3-rancher1
  alpine: rancher/rke-tools:v0.1.56
  nginx_proxy: rancher/rke-tools:v0.1.56
  cert_downloader: rancher/rke-tools:v0.1.56
  kubernetes_services_sidecar: rancher/rke-tools:v0.1.56
  kubedns: rancher/k8s-dns-kube-dns:1.15.0
  dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
  kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
  kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.7.1
  coredns: rancher/coredns-coredns:1.6.5
  coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.7.1
  nodelocal: rancher/k8s-dns-node-cache:1.15.7
  kubernetes: rancher/hyperkube:v1.17.5-rancher1
  flannel: rancher/coreos-flannel:v0.11.0-rancher1
  flannel_cni: rancher/flannel-cni:v0.3.0-rancher5
  calico_node: rancher/calico-node:v3.13.0
  calico_cni: rancher/calico-cni:v3.13.0
  calico_controllers: rancher/calico-kube-controllers:v3.13.0
  calico_ctl: rancher/calico-ctl:v2.0.0
  calico_flexvol: rancher/calico-pod2daemon-flexvol:v3.13.0
  canal_node: rancher/calico-node:v3.13.0
  canal_cni: rancher/calico-cni:v3.13.0
  canal_flannel: rancher/coreos-flannel:v0.11.0
  canal_flexvol: rancher/calico-pod2daemon-flexvol:v3.13.0
  weave_node: weaveworks/weave-kube:2.5.2
  weave_cni: weaveworks/weave-npc:2.5.2
  pod_infra_container: rancher/pause:3.1
  ingress: rancher/nginx-ingress-controller:nginx-0.25.1-rancher1
  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
  metrics_server: rancher/metrics-server:v0.3.6
  windows_pod_infra_container: rancher/kubelet-pause:v0.1.3
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
  mode: rbac
  options: {}
ignore_docker_version: false
kubernetes_version: ""
private_registries: []
ingress:
  provider: "nginx"
  options: {}
  node_selector: {app: ingress}
  extra_args: {}
  dns_policy: ""
  extra_envs: []
  extra_volumes: []
  extra_volume_mounts: []
cluster_name: ""
cloud_provider:
  name: ""
prefix_path: ""
addon_job_timeout: 0
bastion_host:
  address: ""
  port: ""
  user: ""
  ssh_key: ""
  ssh_key_path: ""
  ssh_cert: ""
  ssh_cert_path: ""
monitoring:
  provider: ""
  options: {}
  node_selector: {}
restore:
  restore: false
  snapshot_name: ""
dns: null

 

##安装集群
~# rke up

在创建Kubernetes集群时会有日志语句。

INFO[0230] [addons] Setting up coredns
INFO[0230] [addons] Saving ConfigMap for addon rke-coredns-addon to Kubernetes
INFO[0231] [addons] Successfully saved ConfigMap for addon rke-coredns-addon to Kubernetes
INFO[0231] [addons] Executing deploy job rke-coredns-addon
INFO[0241] [addons] CoreDNS deployed successfully..
INFO[0241] [dns] DNS provider coredns deployed successfully
......
INFO[0267] [ingress] ingress controller nginx deployed successfully
INFO[0267] [addons] Setting up user addons
INFO[0267] [addons] no user addons defined
INFO[0267] Finished building Kubernetes cluster successfully

当最后一行显示Finished building Kubernetes cluster successfully表示集群已部署完成。作为Kubernetes创建过程的一部分,已创建并编写了一个kubeconfig文件,该文件kube_config_cluster.yml用于与Kubernetes集群进行交互。 如果您使用了不同的cluster.yml文件名,则kube配置文件将以kube_config_<RKE_FILE_NAME>.yml命名

3. 保存文件

将以下文件的副本保存在安全位置:

  • cluster.yml:RKE集群配置文件。
  • kube_config_cluster.yml:集群的Kubeconfig文件,此文件包含完全访问集群的凭据。
  • cluster.rkestate:Kubernetes集群状态文件,此文件包含访问集群的重要凭据。

提示

使用RKE v0.2.0或更高版本时才会创建Kubernetes集群状态文件。

4.配置环境变量

rm -rf $HOME/.kube
mkdir -p $HOME/.kube
sudo cp -i /etc/rke/kube_config_cluster.yml $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
##或
#vi /etc/profile
export KUBECONFIG=/etc/rke/kube_config_cluster.yml
#source /etc/profile

5.安装kubectl 集群管理工具

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubectl

#验证集群

查看集群版本:
[root@master ~]# kubectl --kubeconfig /etc/rke/kube_config_cluster.yml version
Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.3", GitCommit:"2d3c76f9091b6bec110a5e63777c332469e0cba2", GitTreeState:"clean", BuildDate:"2019-08-19T11:13:54Z", GoVersion:"go1.12.9", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.2", GitCommit:"59603c6e503c87169aea6106f57b9f242f64df89", GitTreeState:"clean", BuildDate:"2020-01-18T23:22:30Z", GoVersion:"go1.13.5", Compiler:"gc", Platform:"linux/amd64"}

查看nodes
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
172.20.100.103 Ready controlplane,etcd,worker 52m v1.17.2
172.20.100.104 Ready controlplane,etcd,worker 52m v1.17.2
172.20.100.105 Ready controlplane,etcd,worker 52m v1.17.2

6.安装Helm

##本章节使用的helm v2.14.2
wget https://get.helm.sh/helm-v2.14.2-linux-amd64.tar.gz
tar zxvf helm-v2.14.2-linux-amd64.tar.gz
cd linux-amd64
cp helm /usr/bin
================================================================
wget https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz
tar xvf helm-v3.6.3-linux-amd64.tar.gz
mv linux-amd64/helm /usr/bin/
# helm version
version.BuildInfo{Version:"v3.6.3", GitCommit:"d878d4d45863e42fd5cff6743294a11d28a9abce", GitTreeState:"clean", GoVersion:"go1.13.8"}

######以下操作只针对Helm v2

使用Helm在集群上安装tiller服务以管理charts,由于RKE默认启用RBAC, 因此我们需要使用kubectl来创建一个serviceaccount,clusterrolebinding才能让tiller具有部署到集群的权限

## 在kube-system命名空间中创建ServiceAccount
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
## 在kube-system命名空间中创建ServiceAccount
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller --output yaml | sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' | sed 's@  replicas: 1@  replicas: 1\n  selector: {"matchLabels": {"app": "helm", "name": "tiller"}}@' | kubectl apply -f -
# helm version
Client: &version.Version{SemVer:"v2.14.2", GitCommit:"a8b13cc5ab6a7dbef0a58f5061bcc7c0c61598e7", GitTreeState:"clean"}
Server: &version.Version{SemVer:"v2.14.2", GitCommit:"a8b13cc5ab6a7dbef0a58f5061bcc7c0c61598e7", GitTreeState:"clean"}

7.kubernetes补全命令

su - root
vi /etc/profile
export KUBECONFIG=/home/rancher/kube_config_rancher-cluster.yml
$ echo "source <(kubectl completion bash)" >> ~/.bashrc
source /etc/profile
$ source ~/.bashrc
$ su - rancher
$ echo "source <(kubectl completion bash)" >> ~/.bashrc
$ source ~/.bashrc

8.helm安装rancher

##添加仓库
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable  
[root@cnvs-kubm-8-103 ~]# helm search rancher
NAME CHART VERSION APP VERSION DESCRIPTION
rancher-stable/rancher 2.3.6 v2.3.6 Install Rancher Server to manage Kubernetes clusters acro...

8.1安装 cert-manager

提示

如果您使用自己的证书文件 ingress.tls.source=secret或者使用外部 TLS 负载均衡器可以跳过此步骤。

仅在使用 Rancher 生成的证书 ingress.tls.source=rancher 或 Let’s Encrypt 颁发的证书 ingress.tls.source=letsEncrypt时才需要 cert-manager。

这些说明来自官方的 cert-manager 文档

# 安装 CustomResourceDefinition 资源

kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml

# **重要:**
# 如果您正在运行 Kubernetes v1.15 或更低版本,
# 则需要在上方的 kubectl apply 命令中添加`--validate=false`标志,
# 否则您将在 cert-manager 的 CustomResourceDefinition 资源中收到与
# x-kubernetes-preserve-unknown-fields 字段有关的验证错误。
# 这是一个良性错误,是由于 kubectl 执行资源验证的方式造成的。

# 为 cert-manager 创建命名空间

kubectl create namespace cert-manager

# 添加 Jetstack Helm 仓库

helm repo add jetstack https://charts.jetstack.io

# 更新本地 Helm chart 仓库缓存

helm repo update

# 安装 cert-manager Helm chart

helm install jetstack/cert-manager \
 --name cert-manager \
 --namespace cert-manager \
 --version v0.12.0

安装完 cert-manager 后,您可以通过检查 cert-manager 命名空间中正在运行的 Pod 来验证它是否已正确部署

[root@cnvs-kubm-8-103 ~]# kubectl get pods --namespace cert-manager
NAME                                       READY   STATUS    RESTARTS   AGE
cert-manager-6bcdf8c5cc-px8pp              1/1     Running   0          33s
cert-manager-cainjector-6659d6844d-mp76w   1/1     Running   0          34s
cert-manager-webhook-547567b88f-srv6c      1/1     Running   0          33s

使用 Rancher 生成的自签名证书

因为rancheringress.tls.source的默认选项,所以在运行helm install命令时我们没有指定ingress.tls.source

  • hostname设置为您指向负载均衡器的 DNS 名称。
  • 如果您在安装 alpha 版本,需要把--devel 选项添加到下面到 Helm 命令中。
  • 要安装指定版本的 Rancher,请使用--version选项,例如:--version 2.3.6
 helm install rancher-stable/rancher \
 --name rancher \
 --namespace cattle-system \
 --set hostname=devjprancher.pt.com

等待 Rancher 运行

nginx配置文件

#upstream devtestjp-prod-rancher {
#     server   172.19.8.111 max_fails=2 fail_timeout=30s;
#    }
    map $http_upgrade $connection_upgrade {
        default Upgrade;
        ''      close;
    }
server {
        listen 443 ssl;
#        listen 443 ssl http2;
        server_name rancher.pt.com;
        ssl_certificate /usr/local/openresty/nginx/ssl2018/ptmind.com.20201217.pem;
        ssl_certificate_key /usr/local/openresty/nginx/ssl2018/ptmind.com.20201217.key;
            #access_log       /data/nginxlog/k8scs.ptmind.com.log access;

        location / {
            proxy_set_header Host $host;
            proxy_set_header X-Forwarded-Proto $scheme;
            proxy_set_header X-Forwarded-Port $server_port;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_pass http://172.19.8.111;
            proxy_http_version 1.1;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection $connection_upgrade;
            # This allows the ability for the execute shell window to remain open for up to 15 minutes.
            ## Without this parameter, the default is 1 minute and will automatically close.
            proxy_read_timeout 900s;
            proxy_buffering off;
            #office only
            #include /usr/local/openresty/nginx/conf/acl.cfg;
        }
    }

    server {
        listen 80;
        server_name rancher.pt.com;
        return 301 https://$server_name$request_uri;
    }
#stream {
#    upstream devkube_apiserver {
#        least_conn;
#          server 172.19.8.103:6443 weight=5 max_fails=2 fail_timeout=10s;
#          server 172.19.8.104:6443 weight=5 max_fails=2 fail_timeout=10s;
#          server 172.19.8.108:6443 weight=5 max_fails=2 fail_timeout=10s;
#        }
#
#    server {
#        listen        0.0.0.0:16443;
#        proxy_pass   devkube_apiserver;
#        proxy_timeout 10m;
#        proxy_connect_timeout 1s;
#    }
#}

浏览器访问

###补充参数忽略
kubectl create namespace cattle-system
helm install rancher-stable/rancher \
 --name rancher \
 --namespace cattle-system \
 --set auditLog.level=1 \
 --set auditLog.maxAge=3 \
 --set auditLog.maxBackups=2 \
 --set auditLog.maxSize=2000 \
 --set tls=external \
 --set hostname=rancher.pt.com  

参考文档:

https://rancher2.docs.rancher.cn/docs/installation/k8s-install/helm-rancher/_index
https://github.com/helm/helm/issues/6374
http://www.eryajf.net/2723.html
https://blog.51cto.com/michaelkang/2434376

0
如无特殊说明,文章均为本站原创,转载请注明出处

该文章由 发布

这货来去如风,什么鬼都没留下!!!
发表我的评论

Hi,请填写昵称和邮箱!

取消评论
代码 贴图 加粗 链接 删除线 签到